func (s *HostUpdateSuite) TestUpdateLogs(t *c.C) { if testCluster == nil { t.Skip("cannot boot new hosts") } instance := s.addHost(t) defer s.removeHost(t, instance) httpClient := &http.Client{Transport: &http.Transport{Dial: dialer.Retry.Dial}} client := cluster.NewHost(instance.ID, fmt.Sprintf("http://%s:1113", instance.IP), httpClient) // start partial logger job cmd := exec.JobUsingHost( client, exec.DockerImage(imageURIs["test-apps"]), &host.Job{ Config: host.ContainerConfig{Cmd: []string{"/bin/partial-logger"}}, Metadata: map[string]string{ "flynn-controller.app": "partial-logger", }, }, ) t.Assert(cmd.Start(), c.IsNil) defer cmd.Kill() // wait for partial line _, err := s.discoverdClient(t).Instances("partial-logger", 10*time.Second) t.Assert(err, c.IsNil) // update flynn-host pid, err := client.Update("/usr/local/bin/flynn-host", "daemon", "--id", cmd.HostID) t.Assert(err, c.IsNil) // update the pid file so removeHost works t.Assert(instance.Run(fmt.Sprintf("echo -n %d | sudo tee /var/run/flynn-host.pid", pid), nil), c.IsNil) // finish logging t.Assert(client.SignalJob(cmd.Job.ID, int(syscall.SIGUSR1)), c.IsNil) // check we get a single log line logc, err := logaggc.New("") t.Assert(err, c.IsNil) log, err := logc.GetLog("partial-logger", &logaggc.LogOpts{Follow: true}) t.Assert(err, c.IsNil) defer log.Close() msgs := make(chan *logaggc.Message) go func() { defer close(msgs) dec := json.NewDecoder(log) for { var msg logaggc.Message if err := dec.Decode(&msg); err != nil { debugf(t, "error decoding message: %s", err) return } msgs <- &msg } }() for { select { case msg, ok := <-msgs: if !ok { t.Fatal("error getting log") } if msg.Stream == "stdout" { t.Assert(msg.Msg, c.Equals, "hello world") return } case <-time.After(10 * time.Second): t.Fatal("timed out waiting for log") } } }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, MongoDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) type manifestStep struct { ID string Artifacts []*ct.Artifact Artifact *ct.Artifact Release struct { Env map[string]string Processes map[string]ct.ProcessType } } var manifestSteps []*manifestStep if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } manifestStepMap := make(map[string]bootstrap.Step, len(manifestSteps)) steps, err := bootstrap.UnmarshalManifest(manifest, nil) if err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } for _, step := range steps { manifestStepMap[step.StepMeta.ID] = step } artifacts := make(map[string]*ct.Artifact) updateProcArgs := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Args = proc.Args f.Release.Processes[typ] = p } } updateVolumes := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Volumes = proc.Volumes f.Release.Processes[typ] = p } } for _, step := range manifestSteps { switch step.ID { case "discoverd": updateVolumes(data.Discoverd, step) case "postgres": updateProcArgs(data.Postgres, step) updateVolumes(data.Postgres, step) case "controller": updateProcArgs(data.Controller, step) case "mariadb": if data.MariaDB != nil { updateProcArgs(data.MariaDB, step) updateVolumes(data.MariaDB, step) } case "mongodb": if data.MongoDB != nil { updateProcArgs(data.MongoDB, step) updateVolumes(data.MongoDB, step) } } if step.Artifact != nil { artifacts[step.ID] = step.Artifact } else if len(step.Artifacts) > 0 { artifacts[step.ID] = step.Artifacts[0] } } data.Discoverd.Artifacts = []*ct.Artifact{artifacts["discoverd"]} data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.Artifacts = []*ct.Artifact{artifacts["postgres"]} data.Flannel.Artifacts = []*ct.Artifact{artifacts["flannel"]} data.Controller.Artifacts = []*ct.Artifact{artifacts["controller"]} if data.MariaDB != nil { data.MariaDB.Artifacts = []*ct.Artifact{artifacts["mariadb"]} } if data.MongoDB != nil { data.MongoDB.Artifacts = []*ct.Artifact{artifacts["mongodb"]} } // set TELEMETRY_CLUSTER_ID telemetryClusterID := random.UUID() sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{TELEMETRY_CLUSTER_ID}', '%q') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'controller'); `, telemetryClusterID)) data.Controller.Release.Env["TELEMETRY_CLUSTER_ID"] = telemetryClusterID // set TELEMETRY_BOOTSTRAP_ID if unset if data.Controller.Release.Env["TELEMETRY_BOOTSTRAP_ID"] == "" { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{TELEMETRY_BOOTSTRAP_ID}', '%q') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'controller'); `, telemetryClusterID)) data.Controller.Release.Env["TELEMETRY_BOOTSTRAP_ID"] = telemetryClusterID } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // ensure flannel has NETWORK set if required if network := os.Getenv("FLANNEL_NETWORK"); network != "" { data.Flannel.Release.Env["NETWORK"] = network sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'NETWORK', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'flannel'); `, network)) } // ensure controller / gitreceive have tmp volumes sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{web,volumes}', '[{"path": "/tmp", "delete_on_stop": true}]') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'controller'); UPDATE releases SET processes = jsonb_set(processes, '{app,volumes}', '[{"path": "/tmp", "delete_on_stop": true}]') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'gitreceive'); `) // update the SINGLETON environment variable for database appliances // (which includes updating legacy appliances which had SINGLETON set // on the database type rather than the release) singleton := strconv.FormatBool(cfg.Singleton) data.Postgres.Release.Env["SINGLETON"] = singleton sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{SINGLETON}', '%q') WHERE release_id IN (SELECT release_id FROM apps WHERE name IN ('postgres', 'mariadb', 'mongodb')); `, singleton)) if data.MariaDB != nil { data.MariaDB.Release.Env["SINGLETON"] = singleton delete(data.MariaDB.Release.Processes["mariadb"].Env, "SINGLETON") sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{mariadb,env}', (processes #> '{mariadb,env}')::jsonb - 'SINGLETON') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'mariadb'); `) } if data.MongoDB != nil { data.MongoDB.Release.Env["SINGLETON"] = singleton delete(data.MongoDB.Release.Processes["mongodb"].Env, "SINGLETON") sqlBuf.WriteString(` UPDATE releases SET processes = jsonb_set(processes, '{mongodb,env}', (processes #> '{mongodb,env}')::jsonb - 'SINGLETON') WHERE release_id IN (SELECT release_id FROM apps WHERE name = 'mongodb'); `) } // modify app scale based on whether we are booting // a singleton or HA cluster var scale map[string]map[string]int if cfg.Singleton { scale = map[string]map[string]int{ "postgres": {"postgres": 1, "web": 1}, "mariadb": {"web": 1}, "mongodb": {"web": 1}, "controller": {"web": 1, "worker": 1}, "redis": {"web": 1}, "blobstore": {"web": 1}, "gitreceive": {"app": 1}, "docker-receive": {"app": 1}, "logaggregator": {"app": 1}, "dashboard": {"web": 1}, "status": {"web": 1}, } data.Postgres.Processes["postgres"] = 1 data.Postgres.Processes["web"] = 1 if data.MariaDB != nil { data.MariaDB.Processes["mariadb"] = 1 data.MariaDB.Processes["web"] = 1 } if data.MongoDB != nil { data.MongoDB.Processes["mongodb"] = 1 data.MongoDB.Processes["web"] = 1 } } else { scale = map[string]map[string]int{ "postgres": {"postgres": 3, "web": 2}, "mariadb": {"web": 2}, "mongodb": {"web": 2}, "controller": {"web": 2, "worker": 2}, "redis": {"web": 2}, "blobstore": {"web": 2}, "gitreceive": {"app": 2}, "docker-receive": {"app": 2}, "logaggregator": {"app": 2}, "dashboard": {"web": 2}, "status": {"web": 2}, } data.Postgres.Processes["postgres"] = 3 data.Postgres.Processes["web"] = 2 if data.MariaDB != nil { data.MariaDB.Processes["mariadb"] = 3 data.MariaDB.Processes["web"] = 2 } if data.MongoDB != nil { data.MongoDB.Processes["mongodb"] = 3 data.MongoDB.Processes["web"] = 2 } } for app, procs := range scale { for typ, count := range procs { sqlBuf.WriteString(fmt.Sprintf(` UPDATE formations SET processes = jsonb_set(processes, '{%s}', '%d') WHERE release_id = (SELECT release_id FROM apps WHERE name = '%s'); `, typ, count, app)) } } // start discoverd/flannel/postgres systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "postgres", }), } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd'); `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // make sure STATUS_KEY has the correct value in the dashboard release sqlBuf.WriteString(` UPDATE releases SET env = jsonb_set(env, '{STATUS_KEY}', ( SELECT env->'AUTH_KEY' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'status') )) WHERE release_id = (SELECT release_id FROM apps WHERE name = 'dashboard'); `) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], artifacts["postgres"], nil) cmd.Args = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } controllerKey := data.Controller.Release.Env["AUTH_KEY"] client, err := controller.NewClient("http://"+controllerInstances[0].Addr, controllerKey) if err != nil { return err } // start mariadb and load data if it was present in the backup. mysqldb, err := getFile("mysql.sql.gz") if err == nil && data.MariaDB != nil { _, err = bootstrap.Manifest{ step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, }), step("mariadb-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "mariadb", }), }.RunWithState(ch, state) if err != nil { return err } // ensure the formation is correct in the database if err := client.PutFormation(data.MariaDB.Formation()); err != nil { return fmt.Errorf("error updating mariadb formation: %s", err) } cmd = exec.JobUsingHost(state.Hosts[0], artifacts["mariadb"], nil) cmd.Args = []string{"mysql", "-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start mongodb and load data if it was present in the backup. mongodb, err := getFile("mongodb.archive.gz") if err == nil && data.MongoDB != nil { _, err = bootstrap.Manifest{ step("mongodb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MongoDB, }), step("mongodb-wait", "sirenia-wait", &bootstrap.SireniaWaitAction{ Service: "mongodb", }), }.RunWithState(ch, state) if err != nil { return err } // ensure the formation is correct in the database if err := client.PutFormation(data.MongoDB.Formation()); err != nil { return fmt.Errorf("error updating mongodb formation: %s", err) } cmd = exec.JobUsingHost(state.Hosts[0], artifacts["mongodb"], nil) cmd.Args = []string{"mongorestore", "-h", "leader.mongodb.discoverd", "-u", "flynn", "-p", data.MongoDB.Release.Env["MONGO_PWD"], "--archive"} cmd.Stdin = mongodb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mongodb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mongodb restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // get blobstore config blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(controllerKey) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start the blobstore blobstoreFormation.Artifacts = []*ct.Artifact{artifacts["blobstore"]} _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 200, }), }.RunWithState(ch, state) if err != nil { return err } // now that the controller and blobstore are up and controller // migrations have run (so we know artifacts have a manifest column), // migrate all artifacts to Flynn images jsonb := func(v interface{}) []byte { data, _ := json.Marshal(v) return data } sqlBuf.Reset() for _, step := range manifestSteps { artifact, ok := artifacts[step.ID] if !ok { continue } // update current artifact in database for service sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s', type = 'flynn', manifest = '%s', hashes = '%s', size = %d, layer_url_template = '%s', meta = '%s' WHERE artifact_id = ( SELECT artifact_id FROM release_artifacts WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%s' ) );`, artifact.URI, jsonb(&artifact.RawManifest), jsonb(artifact.Hashes), artifact.Size, artifact.LayerURLTemplate, jsonb(artifact.Meta), step.ID)) } // create the slugbuilder artifact if gitreceive still references it by // URI (in which case there is no slugbuilder artifact in the database) slugBuilder := artifacts["slugbuilder-image"] sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGBUILDER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN INSERT INTO artifacts (artifact_id, type, uri, manifest, hashes, size, layer_url_template, meta) VALUES ('%s', 'flynn', '%s', '%s', '%s', %d, '%s', '%s'); END IF; END; $$;`, random.UUID(), slugBuilder.URI, jsonb(&slugBuilder.RawManifest), jsonb(slugBuilder.Hashes), slugBuilder.Size, slugBuilder.LayerURLTemplate, jsonb(slugBuilder.Meta))) // create the slugrunner artifact if it doesn't exist (which can be the // case if no apps were deployed with git push in older clusters where // it was created lazily) slugRunner := artifacts["slugrunner-image"] sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGRUNNER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN IF NOT EXISTS (SELECT 1 FROM artifacts WHERE uri = (SELECT env->>'SLUGRUNNER_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'))) THEN INSERT INTO artifacts (artifact_id, type, uri, manifest, hashes, size, layer_url_template, meta) VALUES ('%s', 'flynn', '%s', '%s', '%s', %d, '%s', '%s'); END IF; END IF; END; $$;`, random.UUID(), slugRunner.URI, jsonb(&slugRunner.RawManifest), jsonb(slugRunner.Hashes), slugRunner.Size, slugRunner.LayerURLTemplate, jsonb(slugRunner.Meta))) // update slug artifacts currently being referenced by gitreceive // (which will also update all current user releases to use the // latest slugrunner) for _, name := range []string{"slugbuilder", "slugrunner"} { artifact := artifacts[name+"-image"] sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%[1]s', type = 'flynn', manifest = '%[2]s', hashes = '%[3]s', size = %[4]d, layer_url_template = '%[5]s', meta = '%[6]s' WHERE artifact_id = (SELECT (env->>'%[7]s_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) OR uri = (SELECT env->>'%[7]s_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifact.URI, jsonb(&artifact.RawManifest), jsonb(artifact.Hashes), artifact.Size, artifact.LayerURLTemplate, jsonb(artifact.Meta), strings.ToUpper(name))) } // update the URI of redis artifacts currently being referenced by // the redis app (which will also update all current redis resources // to use the latest redis image) redisImage := artifacts["redis-image"] sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s', type = 'flynn', manifest = '%s', hashes = '%s', size = %d, layer_url_template = '%s', meta = '%s' WHERE artifact_id = (SELECT (env->>'REDIS_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis')) OR uri = (SELECT env->>'REDIS_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis'));`, redisImage.URI, jsonb(&redisImage.RawManifest), jsonb(redisImage.Hashes), redisImage.Size, redisImage.LayerURLTemplate, jsonb(redisImage.Meta))) // ensure the image ID environment variables are set for legacy apps // which use image URI variables for _, name := range []string{"redis", "slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = jsonb_set(env, '{%[1]s_IMAGE_ID}', ('"' || (SELECT artifact_id::text FROM artifacts WHERE uri = '%[2]s') || '"')::jsonb, true) WHERE env->>'%[1]s_IMAGE_URI' IS NOT NULL;`, strings.ToUpper(name), artifacts[name+"-image"].URI)) } // run the above artifact migration SQL against the controller database cmd = exec.JobUsingHost(state.Hosts[0], artifacts["postgres"], nil) cmd.Args = []string{"psql", "--echo-queries"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } cmd.Stdin = sqlBuf meta = bootstrap.StepMeta{ID: "migrate-artifacts", Action: "migrate-artifacts"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating artifacts: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } // determine if there are any slugs or docker images which need to be // converted to Flynn images migrateSlugs := false migrateDocker := false artifactList, err := client.ArtifactList() if err != nil { return fmt.Errorf("error listing artifacts: %s", err) } for _, artifact := range artifactList { if artifact.Type == ct.DeprecatedArtifactTypeFile { migrateSlugs = true } if artifact.Type == ct.DeprecatedArtifactTypeDocker && artifact.Meta["docker-receive.repository"] != "" { migrateDocker = true } if migrateSlugs && migrateDocker { break } } runMigrator := func(cmd *exec.Cmd) error { out, err := cmd.StdoutPipe() if err != nil { return err } done := make(chan struct{}) go func() { defer close(done) s := bufio.NewScanner(out) for s.Scan() { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "info", StepData: s.Text(), Timestamp: time.Now().UTC(), } } }() err = cmd.Run() select { case <-done: case <-time.After(time.Second): } return err } if migrateSlugs { cmd = exec.JobUsingHost(state.Hosts[0], artifacts["slugbuilder-image"], nil) cmd.Args = []string{"/bin/slug-migrator"} cmd.Env = map[string]string{ "CONTROLLER_KEY": data.Controller.Release.Env["AUTH_KEY"], "FLYNN_POSTGRES": data.Controller.Release.Env["FLYNN_POSTGRES"], "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } if err := runMigrator(cmd); err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating slugs: %s", err), Err: err, Timestamp: time.Now().UTC(), } return err } } if migrateDocker { // start docker-receive dockerRelease, err := client.GetAppRelease("docker-receive") if err != nil { return fmt.Errorf("error getting docker-receive release: %s", err) } dockerFormation, err := client.GetExpandedFormation("docker-receive", dockerRelease.ID) if err != nil { return fmt.Errorf("error getting docker-receive expanded formation: %s", err) } dockerFormation.Artifacts = []*ct.Artifact{artifacts["docker-receive"]} _, err = bootstrap.Manifest{ step("docker-receive", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: dockerFormation, }), step("docker-receive-wait", "wait", &bootstrap.WaitAction{ URL: "http://docker-receive.discoverd/v2/", Status: 401, }), }.RunWithState(ch, state) if err != nil { return err } // run the docker image migrator cmd = exec.JobUsingHost(state.Hosts[0], artifacts["docker-receive"], nil) cmd.Args = []string{"/bin/docker-migrator"} cmd.Env = map[string]string{ "CONTROLLER_KEY": data.Controller.Release.Env["AUTH_KEY"], "FLYNN_POSTGRES": data.Controller.Release.Env["FLYNN_POSTGRES"], "PGHOST": "leader.postgres.discoverd", "PGUSER": data.Controller.Release.Env["PGUSER"], "PGDATABASE": data.Controller.Release.Env["PGDATABASE"], "PGPASSWORD": data.Controller.Release.Env["PGPASSWORD"], } cmd.Volumes = []*ct.VolumeReq{{Path: "/tmp", DeleteOnStop: true}} if err := runMigrator(cmd); err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error migrating Docker images: %s", err), Err: err, Timestamp: time.Now().UTC(), } return err } } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start scheduler and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", Timeout: 600, }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } // mariadb and mongodb steps require the controller key state.StepData["controller-key"] = &bootstrap.RandomData{controllerKey} // deploy mariadb if it wasn't restored from the backup if data.MariaDB == nil { steps := bootstrap.Manifest{ manifestStepMap["mariadb-password"], manifestStepMap["mariadb"], manifestStepMap["add-mysql-provider"], manifestStepMap["mariadb-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying mariadb: %s", err) } } // deploy mongodb if it wasn't restored from the backup if data.MongoDB == nil { steps := bootstrap.Manifest{ manifestStepMap["mongodb-password"], manifestStepMap["mongodb"], manifestStepMap["add-mongodb-provider"], manifestStepMap["mongodb-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying mongodb: %s", err) } } // deploy docker-receive if it wasn't in the backup if _, err := client.GetApp("docker-receive"); err == controller.ErrNotFound { routes, err := client.RouteList("controller") if len(routes) == 0 { err = errors.New("no routes found") } if err != nil { return fmt.Errorf("error listing controller routes: %s", err) } for _, r := range routes { if r.Domain == fmt.Sprintf("controller.%s", data.Controller.Release.Env["DEFAULT_ROUTE_DOMAIN"]) { state.StepData["controller-cert"] = &tlscert.Cert{ Cert: r.Certificate.Cert, PrivateKey: r.Certificate.Key, } break } } steps := bootstrap.Manifest{ manifestStepMap["docker-receive-secret"], manifestStepMap["docker-receive"], manifestStepMap["docker-receive-route"], manifestStepMap["docker-receive-wait"], } if _, err := steps.RunWithState(ch, state); err != nil { return fmt.Errorf("error deploying docker-receive: %s", err) } } return nil }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, MongoDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) type manifestStep struct { ID string Artifact struct { URI string } Release struct { Env map[string]string Processes map[string]ct.ProcessType } } var manifestSteps []*manifestStep if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } artifactURIs := make(map[string]string) updateProcArgs := func(f *ct.ExpandedFormation, step *manifestStep) { for typ, proc := range step.Release.Processes { p := f.Release.Processes[typ] p.Args = proc.Args f.Release.Processes[typ] = p } } for _, step := range manifestSteps { switch step.ID { case "postgres": updateProcArgs(data.Postgres, step) case "controller": updateProcArgs(data.Controller, step) case "mariadb": if data.MariaDB != nil { updateProcArgs(data.MariaDB, step) } case "mongodb": if data.MongoDB != nil { updateProcArgs(data.MongoDB, step) } } if step.Artifact.URI != "" { artifactURIs[step.ID] = step.Artifact.URI // update current artifact in database for service, taking care to // check the database version as migration 15 changed the way // artifacts are related to releases in the database sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT MAX(id) FROM schema_migrations) < 15 THEN UPDATE artifacts SET uri = '%[1]s' WHERE artifact_id = ( SELECT artifact_id FROM releases WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%[2]s' ) ); ELSE UPDATE artifacts SET uri = '%[1]s' WHERE type = 'docker' AND artifact_id = ( SELECT artifact_id FROM release_artifacts WHERE release_id = ( SELECT release_id FROM apps WHERE name = '%[2]s' ) ); END IF; END; $$;`, step.Artifact.URI, step.ID)) } } data.Discoverd.ImageArtifact.URI = artifactURIs["discoverd"] data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.ImageArtifact.URI = artifactURIs["postgres"] data.Flannel.ImageArtifact.URI = artifactURIs["flannel"] data.Controller.ImageArtifact.URI = artifactURIs["controller"] if data.MariaDB != nil { data.MariaDB.ImageArtifact.URI = artifactURIs["mariadb"] if data.MariaDB.Processes["mariadb"] == 0 { // skip mariadb if it wasn't scaled up in the backup data.MariaDB = nil } } if data.MongoDB != nil { data.MongoDB.ImageArtifact.URI = artifactURIs["mongodb"] if data.MongoDB.Processes["mongodb"] == 0 { // skip mongodb if it wasn't scaled up in the backup data.MongoDB = nil } } // create the slugbuilder artifact if gitreceive still references // SLUGBUILDER_IMAGE_URI (in which case there is no slugbuilder // artifact in the database) sqlBuf.WriteString(fmt.Sprintf(` DO $$ BEGIN IF (SELECT env->>'SLUGBUILDER_IMAGE_ID' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) IS NULL THEN INSERT INTO artifacts (artifact_id, type, uri) VALUES ('%s', 'docker', '%s'); END IF; END; $$;`, random.UUID(), artifactURIs["slugbuilder-image"])) // update the URI of slug artifacts currently being referenced by // gitreceive (which will also update all current user releases // to use the latest slugrunner) for _, name := range []string{"slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%[1]s' WHERE artifact_id = (SELECT (env->>'%[2]s_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive')) OR uri = (SELECT env->>'%[2]s_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifactURIs[name+"-image"], strings.ToUpper(name))) } // update the URI of redis artifacts currently being referenced by // the redis app (which will also update all current redis // resources to use the latest redis image) sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE artifact_id = (SELECT (env->>'REDIS_IMAGE_ID')::uuid FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis')) OR uri = (SELECT env->>'REDIS_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'redis'));`, artifactURIs["redis-image"])) // ensure the image ID environment variables are set for legacy apps // which use image URI variables for _, name := range []string{"redis", "slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, '%[1]s_IMAGE_ID', (SELECT artifact_id::text FROM artifacts WHERE uri = '%[2]s')) WHERE env->>'%[1]s_IMAGE_URI' IS NOT NULL;`, strings.ToUpper(name), artifactURIs[name+"-image"])) } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // start discoverd/flannel/postgres/mariadb cfg.Singleton = data.Postgres.Release.Env["SINGLETON"] == "true" systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "wait", &bootstrap.WaitAction{ URL: "http://postgres-api.discoverd/ping", }), } // Only run up MariaDB if it's in the backup if data.MariaDB != nil { systemSteps = append(systemSteps, step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, })) systemSteps = append(systemSteps, step("mariadb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mariadb-api.discoverd/ping", })) } // Only run up MongoDB if it's in the backup if data.MongoDB != nil { systemSteps = append(systemSteps, step("mongodb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MongoDB, })) systemSteps = append(systemSteps, step("mongodb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mongodb-api.discoverd/ping", })) } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd') `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.Postgres.ImageArtifact.Type, URI: data.Postgres.ImageArtifact.URI}, nil) cmd.Args = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} var mysqldb io.Reader if data.MariaDB != nil { mysqldb, err = getFile("mysql.sql.gz") if err != nil { return err } } // load data into mariadb if it was present in the backup. if mysqldb != nil && data.MariaDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MariaDB.ImageArtifact.Type, URI: data.MariaDB.ImageArtifact.URI}, nil) cmd.Args = []string{"mysql", "-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } var mongodb io.Reader if data.MongoDB != nil { mongodb, err = getFile("mongodb.archive.gz") if err != nil { return err } } // load data into mongodb if it was present in the backup. if mongodb != nil && data.MongoDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MongoDB.ImageArtifact.Type, URI: data.MongoDB.ImageArtifact.URI}, nil) cmd.Args = []string{"mongorestore", "-h", "leader.mongodb.discoverd", "-u", "flynn", "-p", data.MongoDB.Release.Env["MONGO_PWD"], "--archive"} cmd.Stdin = mongodb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mongodb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mongodb restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } // get blobstore config client, err := controller.NewClient("http://"+controllerInstances[0].Addr, data.Controller.Release.Env["AUTH_KEY"]) if err != nil { return err } blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(data.Controller.Release.Env["AUTH_KEY"]) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start blobstore, scheduler, and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 200, }), step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", Timeout: 600, }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } return nil }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, clusterURL string, hostIPs []string, minHosts, timeout int) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) var data struct { Discoverd, Flannel, Postgres, Controller *ct.ExpandedFormation } for { header, err := tr.Next() if err != nil { return fmt.Errorf("error reading backup file: %s", err) } if path.Base(header.Name) != "flynn.json" { continue } if err := json.NewDecoder(tr).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } break } var db io.Reader rewound := false for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking in backup file: %s", err) } rewound = true } else if err != nil { return fmt.Errorf("error finding db in backup file: %s", err) } if path.Base(header.Name) != "postgres.sql.gz" { continue } db, err = gzip.NewReader(tr) if err != nil { return fmt.Errorf("error opening db from backup file: %s", err) } break } if db == nil { return fmt.Errorf("did not found postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) var manifestSteps []struct { ID string Artifact struct { URI string } Release struct { Env map[string]string } } if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } artifactURIs := make(map[string]string) for _, step := range manifestSteps { if step.Artifact.URI != "" { artifactURIs[step.ID] = step.Artifact.URI if step.ID == "gitreceive" { artifactURIs["slugbuilder"] = step.Release.Env["SLUGBUILDER_IMAGE_URI"] artifactURIs["slugrunner"] = step.Release.Env["SLUGRUNNER_IMAGE_URI"] } // update current artifact in database for service sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE artifact_id = (SELECT artifact_id FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = '%s'));`, step.Artifact.URI, step.ID)) } } data.Discoverd.Artifact.URI = artifactURIs["discoverd"] data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1110,{{ end }}" data.Postgres.Artifact.URI = artifactURIs["postgres"] data.Flannel.Artifact.URI = artifactURIs["flannel"] data.Controller.Artifact.URI = artifactURIs["controller"] for _, app := range []string{"gitreceive", "taffy"} { for _, env := range []string{"slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, '%s_IMAGE_URI', '%s') WHERE release_id = (SELECT release_id from apps WHERE name = '%s');`, strings.ToUpper(env), artifactURIs[env], app)) } } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // start discoverd/flannel/postgres steps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "wait", &bootstrap.WaitAction{ URL: "http://postgres-api.discoverd/ping", }), } state, err := steps.Run(ch, clusterURL, hostIPs, minHosts, timeout) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd') `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.Postgres.Artifact.Type, URI: data.Postgres.Artifact.URI}, nil) cmd.Entrypoint = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-db"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start controller/scheduler data.Controller.Processes["web"] = 1 delete(data.Controller.Processes, "worker") meta = bootstrap.StepMeta{ID: "controller", Action: "run-app"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} if err := (&bootstrap.RunAppAction{ ID: "controller", ExpandedFormation: data.Controller, }).Run(state); err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: err.Error(), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} return nil }
func runBootstrapBackup(manifest []byte, backupFile string, ch chan *bootstrap.StepInfo, cfg bootstrap.Config) error { defer close(ch) f, err := os.Open(backupFile) if err != nil { return fmt.Errorf("error opening backup file: %s", err) } defer f.Close() tr := tar.NewReader(f) getFile := func(name string) (io.Reader, error) { rewound := false var res io.Reader for { header, err := tr.Next() if err == io.EOF && !rewound { if _, err := f.Seek(0, os.SEEK_SET); err != nil { return nil, fmt.Errorf("error seeking in backup file: %s", err) } rewound = true tr = tar.NewReader(f) continue } else if err != nil { return nil, fmt.Errorf("error finding %s in backup file: %s", name, err) } if path.Base(header.Name) != name { continue } if strings.HasSuffix(name, ".gz") { res, err = gzip.NewReader(tr) if err != nil { return nil, fmt.Errorf("error opening %s from backup file: %s", name, err) } } else { res = tr } break } return res, nil } var data struct { Discoverd, Flannel, Postgres, MariaDB, Controller *ct.ExpandedFormation } jsonData, err := getFile("flynn.json") if err != nil { return err } if jsonData == nil { return fmt.Errorf("did not file flynn.json in backup file") } if err := json.NewDecoder(jsonData).Decode(&data); err != nil { return fmt.Errorf("error decoding backup data: %s", err) } db, err := getFile("postgres.sql.gz") if err != nil { return err } if db == nil { return fmt.Errorf("did not find postgres.sql.gz in backup file") } // add buffer to the end of the SQL import containing commands that rewrite data in the controller db sqlBuf := &bytes.Buffer{} db = io.MultiReader(db, sqlBuf) sqlBuf.WriteString(fmt.Sprintf("\\connect %s\n", data.Controller.Release.Env["PGDATABASE"])) sqlBuf.WriteString(` CREATE FUNCTION pg_temp.json_object_update_key( "json" jsonb, "key_to_set" TEXT, "value_to_set" TEXT ) RETURNS jsonb LANGUAGE sql IMMUTABLE STRICT AS $function$ SELECT ('{' || string_agg(to_json("key") || ':' || "value", ',') || '}')::jsonb FROM (SELECT * FROM json_each("json"::json) WHERE "key" <> "key_to_set" UNION ALL SELECT "key_to_set", to_json("value_to_set")) AS "fields" $function$; `) var manifestSteps []struct { ID string Artifact struct { URI string } Release struct { Env map[string]string } } if err := json.Unmarshal(manifest, &manifestSteps); err != nil { return fmt.Errorf("error decoding manifest json: %s", err) } artifactURIs := make(map[string]string) for _, step := range manifestSteps { if step.Artifact.URI != "" { artifactURIs[step.ID] = step.Artifact.URI if step.ID == "gitreceive" { artifactURIs["slugbuilder"] = step.Release.Env["SLUGBUILDER_IMAGE_URI"] artifactURIs["slugrunner"] = step.Release.Env["SLUGRUNNER_IMAGE_URI"] } // update current artifact in database for service sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE artifact_id = (SELECT artifact_id FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = '%s'));`, step.Artifact.URI, step.ID)) } } data.Discoverd.Artifact.URI = artifactURIs["discoverd"] data.Discoverd.Release.Env["DISCOVERD_PEERS"] = "{{ range $ip := .SortedHostIPs }}{{ $ip }}:1111,{{ end }}" data.Postgres.Artifact.URI = artifactURIs["postgres"] data.Flannel.Artifact.URI = artifactURIs["flannel"] data.Controller.Artifact.URI = artifactURIs["controller"] if data.MariaDB != nil { data.MariaDB.Artifact.URI = artifactURIs["mariadb"] if data.MariaDB.Processes["mariadb"] == 0 { // skip mariadb if it wasn't scaled up in the backup data.MariaDB = nil } } sqlBuf.WriteString(fmt.Sprintf(` UPDATE artifacts SET uri = '%s' WHERE uri = (SELECT env->>'SLUGRUNNER_IMAGE_URI' FROM releases WHERE release_id = (SELECT release_id FROM apps WHERE name = 'gitreceive'));`, artifactURIs["slugrunner"])) for _, app := range []string{"gitreceive", "taffy"} { for _, env := range []string{"slugbuilder", "slugrunner"} { sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, '%s_IMAGE_URI', '%s') WHERE release_id = (SELECT release_id from apps WHERE name = '%s');`, strings.ToUpper(env), artifactURIs[env], app)) } } step := func(id, name string, action bootstrap.Action) bootstrap.Step { if ra, ok := action.(*bootstrap.RunAppAction); ok { ra.ID = id } return bootstrap.Step{ StepMeta: bootstrap.StepMeta{ID: id, Action: name}, Action: action, } } // start discoverd/flannel/postgres/mariadb cfg.Singleton = data.Postgres.Release.Env["SINGLETON"] == "true" systemSteps := bootstrap.Manifest{ step("discoverd", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Discoverd, }), step("flannel", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Flannel, }), step("wait-hosts", "wait-hosts", &bootstrap.WaitHostsAction{}), step("postgres", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Postgres, }), step("postgres-wait", "wait", &bootstrap.WaitAction{ URL: "http://postgres-api.discoverd/ping", }), } // Only run up MariaDB if it's in the backup if data.MariaDB != nil { systemSteps = append(systemSteps, step("mariadb", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.MariaDB, })) systemSteps = append(systemSteps, step("mariadb-wait", "wait", &bootstrap.WaitAction{ URL: "http://mariadb-api.discoverd/ping", })) } state, err := systemSteps.Run(ch, cfg) if err != nil { return err } // set DISCOVERD_PEERS in release sqlBuf.WriteString(fmt.Sprintf(` UPDATE releases SET env = pg_temp.json_object_update_key(env, 'DISCOVERD_PEERS', '%s') WHERE release_id = (SELECT release_id FROM apps WHERE name = 'discoverd') `, state.StepData["discoverd"].(*bootstrap.RunAppState).Release.Env["DISCOVERD_PEERS"])) // load data into postgres cmd := exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.Postgres.Artifact.Type, URI: data.Postgres.Artifact.URI}, nil) cmd.Entrypoint = []string{"psql"} cmd.Env = map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGDATABASE": "postgres", "PGPASSWORD": data.Postgres.Release.Env["PGPASSWORD"], } cmd.Stdin = db meta := bootstrap.StepMeta{ID: "restore", Action: "restore-postgres"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err := cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running psql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} var mysqldb io.Reader if data.MariaDB != nil { mysqldb, err = getFile("mysql.sql.gz") if err != nil { return err } } // load data into mariadb if it was present in the backup. if mysqldb != nil && data.MariaDB != nil { cmd = exec.JobUsingHost(state.Hosts[0], host.Artifact{Type: data.MariaDB.Artifact.Type, URI: data.MariaDB.Artifact.URI}, nil) cmd.Entrypoint = []string{"mysql"} cmd.Cmd = []string{"-u", "flynn", "-h", "leader.mariadb.discoverd"} cmd.Env = map[string]string{ "MYSQL_PWD": data.MariaDB.Release.Env["MYSQL_PWD"], } cmd.Stdin = mysqldb meta = bootstrap.StepMeta{ID: "restore", Action: "restore-mariadb"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} out, err = cmd.CombinedOutput() if os.Getenv("DEBUG") != "" { fmt.Println(string(out)) } if err != nil { ch <- &bootstrap.StepInfo{ StepMeta: meta, State: "error", Error: fmt.Sprintf("error running mysql restore: %s - %q", err, string(out)), Err: err, Timestamp: time.Now().UTC(), } return err } ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} } // start controller API data.Controller.Processes = map[string]int{"web": 1} _, err = bootstrap.Manifest{ step("controller", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), }.RunWithState(ch, state) // wait for controller to come up meta = bootstrap.StepMeta{ID: "wait-controller", Action: "wait-controller"} ch <- &bootstrap.StepInfo{StepMeta: meta, State: "start", Timestamp: time.Now().UTC()} controllerInstances, err := discoverd.GetInstances("controller", 30*time.Second) if err != nil { return fmt.Errorf("error getting controller instance: %s", err) } // get blobstore config client, err := controller.NewClient("http://"+controllerInstances[0].Addr, data.Controller.Release.Env["AUTH_KEY"]) if err != nil { return err } blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("error getting blobstore release: %s", err) } blobstoreFormation, err := client.GetExpandedFormation("blobstore", blobstoreRelease.ID) if err != nil { return fmt.Errorf("error getting blobstore expanded formation: %s", err) } state.SetControllerKey(data.Controller.Release.Env["AUTH_KEY"]) ch <- &bootstrap.StepInfo{StepMeta: meta, State: "done", Timestamp: time.Now().UTC()} // start blobstore, scheduler, and enable cluster monitor data.Controller.Processes = map[string]int{"scheduler": 1} // only start one scheduler instance schedulerProcess := data.Controller.Release.Processes["scheduler"] schedulerProcess.Omni = false data.Controller.Release.Processes["scheduler"] = schedulerProcess _, err = bootstrap.Manifest{ step("blobstore", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: blobstoreFormation, }), step("blobstore-wait", "wait", &bootstrap.WaitAction{ URL: "http://blobstore.discoverd", Status: 404, }), step("controller-scheduler", "run-app", &bootstrap.RunAppAction{ ExpandedFormation: data.Controller, }), step("status", "status-check", &bootstrap.StatusCheckAction{ URL: "http://status-web.discoverd", }), step("cluster-monitor", "cluster-monitor", &bootstrap.ClusterMonitorAction{ Enabled: true, }), }.RunWithState(ch, state) if err != nil { return err } return nil }