func (c *controllerAPI) PutFormation(ctx context.Context, w http.ResponseWriter, req *http.Request) { app := c.getApp(ctx) release, err := c.getRelease(ctx) if err != nil { respondWithError(w, err) return } var formation ct.Formation if err = httphelper.DecodeJSON(req, &formation); err != nil { respondWithError(w, err) return } if release.ArtifactID == "" { respondWithError(w, ct.ValidationError{Message: "release is not deployable"}) return } formation.AppID = app.ID formation.ReleaseID = release.ID if err = schema.Validate(formation); err != nil { respondWithError(w, err) return } if err = c.formationRepo.Add(&formation); err != nil { respondWithError(w, err) return } httphelper.JSON(w, 200, &formation) }
func (s *S) createTestFormation(c *C, formation *ct.Formation) *ct.Formation { path := formationPath(formation.AppID, formation.ReleaseID) formation.AppID = "" formation.ReleaseID = "" out := &ct.Formation{} res, err := s.Put(path, formation, out) c.Assert(err, IsNil) c.Assert(res.StatusCode, Equals, 200) return out }
func putFormation(formation ct.Formation, app *ct.App, release *ct.Release, repo *FormationRepo, r ResponseHelper) { formation.AppID = app.ID formation.ReleaseID = release.ID if app.Protected { for typ := range release.Processes { if formation.Processes[typ] == 0 { r.Error(ct.ValidationError{Message: "unable to scale to zero, app is protected"}) return } } } if err := repo.Add(&formation); err != nil { r.Error(err) return } r.JSON(200, &formation) }
func putFormation(formation ct.Formation, app *ct.App, release *ct.Release, repo *FormationRepo, r ResponseHelper) { formation.AppID = app.ID formation.ReleaseID = release.ID err := repo.Add(&formation) if app.Protected { for typ := range release.Processes { if formation.Processes[typ] == 0 { r.JSON(400, struct{}{}) return } } } if err != nil { r.Error(err) return } r.JSON(200, &formation) }
func runImport(args *docopt.Args, client controller.Client) error { var src io.Reader = os.Stdin if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return fmt.Errorf("error opening export file: %s", err) } defer f.Close() src = f } tr := tar.NewReader(src) var ( app *ct.App release *ct.Release imageArtifact *ct.Artifact formation *ct.Formation routes []router.Route slug io.Reader dockerImage struct { config struct { Tag string `json:"tag"` } archive io.Reader } pgDump io.Reader mysqlDump io.Reader uploadSize int64 ) numResources := 0 numRoutes := 1 for { header, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("error reading export tar: %s", err) } switch path.Base(header.Name) { case "app.json": app = &ct.App{} if err := json.NewDecoder(tr).Decode(app); err != nil { return fmt.Errorf("error decoding app: %s", err) } app.ID = "" case "release.json": release = &ct.Release{} if err := json.NewDecoder(tr).Decode(release); err != nil { return fmt.Errorf("error decoding release: %s", err) } release.ID = "" release.ArtifactIDs = nil case "artifact.json": imageArtifact = &ct.Artifact{} if err := json.NewDecoder(tr).Decode(imageArtifact); err != nil { return fmt.Errorf("error decoding image artifact: %s", err) } imageArtifact.ID = "" case "formation.json": formation = &ct.Formation{} if err := json.NewDecoder(tr).Decode(formation); err != nil { return fmt.Errorf("error decoding formation: %s", err) } formation.AppID = "" formation.ReleaseID = "" case "routes.json": if err := json.NewDecoder(tr).Decode(&routes); err != nil { return fmt.Errorf("error decoding routes: %s", err) } for _, route := range routes { route.ID = "" route.ParentRef = "" } case "slug.tar.gz": f, err := ioutil.TempFile("", "slug.tar.gz") if err != nil { return fmt.Errorf("error creating slug tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading slug: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking slug tempfile: %s", err) } slug = f uploadSize += header.Size case "docker-image.json": if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil { return fmt.Errorf("error decoding docker image json: %s", err) } case "docker-image.tar": f, err := ioutil.TempFile("", "docker-image.tar") if err != nil { return fmt.Errorf("error creating docker image tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading docker image: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking docker image tempfile: %s", err) } dockerImage.archive = f uploadSize += header.Size case "postgres.dump": f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } pgDump = f uploadSize += header.Size case "mysql.dump": f, err := ioutil.TempFile("", "mysql.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } mysqlDump = f uploadSize += header.Size } } if app == nil { return fmt.Errorf("missing app.json") } oldName := app.Name if name := args.String["--name"]; name != "" { app.Name = name } if err := client.CreateApp(app); err != nil { return fmt.Errorf("error creating app: %s", err) } var bar *pb.ProgressBar if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.Total = uploadSize bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if pgDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "postgres", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning postgres resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getPgRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting postgres config: %s", err) } config.Stdin = pgDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := pgRestore(client, config); err != nil { return fmt.Errorf("error restoring postgres database: %s", err) } } if mysqlDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "mysql", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning mysql resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getMysqlRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting mysql config: %s", err) } config.Stdin = mysqlDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := mysqlRestore(client, config); err != nil { return fmt.Errorf("error restoring mysql database: %s", err) } } if release != nil && release.Env["FLYNN_REDIS"] != "" { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "redis", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning redis resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } } uploadSlug := release != nil && imageArtifact != nil && slug != nil if uploadSlug { // Use current slugrunner as the artifact gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } if id, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]; ok { imageArtifact, err = client.GetArtifact(id) if err != nil { return fmt.Errorf("unable to get slugrunner image artifact: %s", err) } } else if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok { imageArtifact = &ct.Artifact{ Type: host.ArtifactTypeDocker, URI: uri, } } else { return fmt.Errorf("gitreceive env missing slug runner image") } } if dockerImage.config.Tag != "" && dockerImage.archive != nil { // load the docker image into the Docker daemon cmd := exec.Command("docker", "load") cmd.Stdin = dockerImage.archive if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("error running docker load: %s: %q", err, out) } // use the tag from the config (which will now be applied to // the loaded image) to push the image to docker-receive cluster, err := getCluster() if err != nil { return err } host, err := cluster.DockerPushHost() if err != nil { return err } tag := fmt.Sprintf("%s/%s:latest", host, app.Name) if out, err := exec.Command("docker", "tag", "--force", dockerImage.config.Tag, tag).CombinedOutput(); err != nil { return fmt.Errorf("error tagging docker image: %s: %q", err, out) } artifact, err := dockerPush(client, app.Name, tag) if err != nil { return fmt.Errorf("error pushing docker image: %s", err) } release.ArtifactIDs = []string{artifact.ID} } else if imageArtifact != nil { if imageArtifact.ID == "" { if err := client.CreateArtifact(imageArtifact); err != nil { return fmt.Errorf("error creating image artifact: %s", err) } } release.ArtifactIDs = []string{imageArtifact.ID} } if release != nil { for t, proc := range release.Processes { for i, port := range proc.Ports { if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) { proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1) } } release.Processes[t] = proc } if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if uploadSlug { slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID()) config := runConfig{ App: app.ID, Release: release.ID, DisableLog: true, Args: []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI}, Stdin: slug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } slugArtifact := &ct.Artifact{ Type: host.ArtifactTypeFile, URI: slugURI, } if err := client.CreateArtifact(slugArtifact); err != nil { return fmt.Errorf("error creating slug artifact: %s", err) } release.ID = "" release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID) if release.Meta == nil { release.Meta = make(map[string]string, 1) } release.Meta["git"] = "true" if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if formation != nil && release != nil { formation.ReleaseID = release.ID formation.AppID = app.ID if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error creating formation: %s", err) } } if args.Bool["--routes"] { for _, route := range routes { if err := client.CreateRoute(app.ID, &route); err != nil { if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode { // If the cluster domain matches then the default route // exported will conflict with the one created automatically. continue } return fmt.Errorf("error creating route: %s", err) } numRoutes++ } } fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources) return nil }
func runImport(args *docopt.Args, client *controller.Client) error { var src io.Reader = os.Stdin if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return fmt.Errorf("error opening export file: %s", err) } defer f.Close() src = f } tr := tar.NewReader(src) var ( app *ct.App release *ct.Release artifact *ct.Artifact formation *ct.Formation routes []router.Route slug io.Reader pgDump io.Reader uploadSize int64 ) numResources := 0 numRoutes := 1 for { header, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("error reading export tar: %s", err) } switch path.Base(header.Name) { case "app.json": app = &ct.App{} if err := json.NewDecoder(tr).Decode(app); err != nil { return fmt.Errorf("error decoding app: %s", err) } app.ID = "" case "release.json": release = &ct.Release{} if err := json.NewDecoder(tr).Decode(release); err != nil { return fmt.Errorf("error decoding release: %s", err) } release.ID = "" release.ArtifactID = "" case "artifact.json": artifact = &ct.Artifact{} if err := json.NewDecoder(tr).Decode(artifact); err != nil { return fmt.Errorf("error decoding artifact: %s", err) } artifact.ID = "" case "formation.json": formation = &ct.Formation{} if err := json.NewDecoder(tr).Decode(formation); err != nil { return fmt.Errorf("error decoding formation: %s", err) } formation.AppID = "" formation.ReleaseID = "" case "routes.json": if err := json.NewDecoder(tr).Decode(&routes); err != nil { return fmt.Errorf("error decoding routes: %s", err) } for _, route := range routes { route.ID = "" route.ParentRef = "" } case "slug.tar.gz": f, err := ioutil.TempFile("", "slug.tar.gz") if err != nil { return fmt.Errorf("error creating slug tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading slug: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking slug tempfile: %s", err) } slug = f uploadSize += header.Size case "postgres.dump": f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } pgDump = f uploadSize += header.Size } } if app == nil { return fmt.Errorf("missing app.json") } oldName := app.Name if name := args.String["--name"]; name != "" { app.Name = name } if err := client.CreateApp(app); err != nil { return fmt.Errorf("error creating app: %s", err) } var bar *pb.ProgressBar if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.Total = uploadSize bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if pgDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "postgres", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning postgres resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getPgRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting postgres config: %s", err) } config.Stdin = pgDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := pgRestore(client, config); err != nil { return fmt.Errorf("error restoring postgres database: %s", err) } } if release != nil && release.Env["FLYNN_REDIS"] != "" { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "redis", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning redis resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } } uploadSlug := release != nil && release.Env["SLUG_URL"] != "" && artifact != nil && slug != nil if uploadSlug { // Use current slugrunner as the artifact gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } artifact = &ct.Artifact{ Type: "docker", URI: gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"], } if artifact.URI == "" { return fmt.Errorf("gitreceive env missing SLUGRUNNER_IMAGE_URI") } release.Env["SLUG_URL"] = fmt.Sprintf("http://blobstore.discoverd/%s.tgz", random.UUID()) } if artifact != nil { if err := client.CreateArtifact(artifact); err != nil { return fmt.Errorf("error creating artifact: %s", err) } release.ArtifactID = artifact.ID } if release != nil { for t, proc := range release.Processes { for i, port := range proc.Ports { if port.Service != nil && port.Service.Name == oldName+"-web" { proc.Ports[i].Service.Name = app.Name + "-web" } } release.Processes[t] = proc } if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if uploadSlug { config := runConfig{ App: app.ID, Release: release.ID, DisableLog: true, Entrypoint: []string{"curl"}, Args: []string{"--request", "PUT", "--upload-file", "-", release.Env["SLUG_URL"]}, Stdin: slug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } } if formation != nil && release != nil { formation.ReleaseID = release.ID formation.AppID = app.ID if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error creating formation: %s", err) } } if args.Bool["--routes"] { for _, route := range routes { if err := client.CreateRoute(app.ID, &route); err != nil { if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode { // If the cluster domain matches then the default route // exported will conflict with the one created automatically. continue } return fmt.Errorf("error creating route: %s", err) } numRoutes++ } } fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources) return nil }
func runImport(args *docopt.Args, client controller.Client) error { jobs, err := strconv.Atoi(args.String["--jobs"]) if err != nil { return err } var src io.Reader = os.Stdin if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return fmt.Errorf("error opening export file: %s", err) } defer f.Close() src = f } tr := tar.NewReader(src) var ( app *ct.App release *ct.Release artifacts []*ct.Artifact formation *ct.Formation routes []router.Route legacySlug io.Reader dockerImage struct { config struct { Tag string `json:"tag"` } archive io.Reader } pgDump io.Reader mysqlDump io.Reader uploadSize int64 ) numResources := 0 numRoutes := 1 layers := make(map[string]io.Reader) for { header, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("error reading export tar: %s", err) } filename := path.Base(header.Name) if strings.HasSuffix(filename, ".layer") { f, err := ioutil.TempFile("", "flynn-layer-") if err != nil { return fmt.Errorf("error creating layer tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading %s: %s", header.Name, err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking layer tempfile: %s", err) } layers[strings.TrimSuffix(filename, ".layer")] = f uploadSize += header.Size continue } switch filename { case "app.json": app = &ct.App{} if err := json.NewDecoder(tr).Decode(app); err != nil { return fmt.Errorf("error decoding app: %s", err) } app.ID = "" case "release.json": release = &ct.Release{} if err := json.NewDecoder(tr).Decode(release); err != nil { return fmt.Errorf("error decoding release: %s", err) } release.ID = "" release.ArtifactIDs = nil case "artifacts.json": if err := json.NewDecoder(tr).Decode(&artifacts); err != nil { return fmt.Errorf("error decoding artifacts: %s", err) } case "formation.json": formation = &ct.Formation{} if err := json.NewDecoder(tr).Decode(formation); err != nil { return fmt.Errorf("error decoding formation: %s", err) } formation.AppID = "" formation.ReleaseID = "" case "routes.json": if err := json.NewDecoder(tr).Decode(&routes); err != nil { return fmt.Errorf("error decoding routes: %s", err) } for _, route := range routes { route.ID = "" route.ParentRef = "" } case "slug.tar.gz": f, err := ioutil.TempFile("", "slug.tar.gz") if err != nil { return fmt.Errorf("error creating slug tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading slug: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking slug tempfile: %s", err) } legacySlug = f uploadSize += header.Size case "docker-image.json": if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil { return fmt.Errorf("error decoding docker image json: %s", err) } case "docker-image.tar": f, err := ioutil.TempFile("", "docker-image.tar") if err != nil { return fmt.Errorf("error creating docker image tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading docker image: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking docker image tempfile: %s", err) } dockerImage.archive = f uploadSize += header.Size case "postgres.dump": f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } pgDump = f uploadSize += header.Size case "mysql.dump": f, err := ioutil.TempFile("", "mysql.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } mysqlDump = f uploadSize += header.Size } } if app == nil { return fmt.Errorf("missing app.json") } oldName := app.Name if name := args.String["--name"]; name != "" { app.Name = name } if err := client.CreateApp(app); err != nil { return fmt.Errorf("error creating app: %s", err) } var bar *pb.ProgressBar if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.Total = uploadSize bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if pgDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "postgres", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning postgres resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getPgRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting postgres config: %s", err) } config.Stdin = pgDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := pgRestore(client, config, jobs); err != nil { return fmt.Errorf("error restoring postgres database: %s", err) } } if mysqlDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "mysql", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning mysql resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getMysqlRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting mysql config: %s", err) } config.Stdin = mysqlDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := mysqlRestore(client, config); err != nil { return fmt.Errorf("error restoring mysql database: %s", err) } } if release != nil && release.Env["FLYNN_REDIS"] != "" { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "redis", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning redis resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } } var uploadLegacySlug bool if legacySlug != nil { if err := func() error { gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } // handle legacy clusters which reference Docker image URIs if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok { artifact := &ct.Artifact{ Type: ct.DeprecatedArtifactTypeDocker, URI: uri, } if err := client.CreateArtifact(artifact); err != nil { return fmt.Errorf("error creating image artifact: %s", err) } uploadLegacySlug = true release.ArtifactIDs = []string{artifact.ID} return nil } slugBuilderID, ok := gitreceiveRelease.Env["SLUGBUILDER_IMAGE_ID"] if !ok { return fmt.Errorf("gitreceive env missing slugbuilder image") } slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"] if !ok { return fmt.Errorf("gitreceive env missing slugrunner image") } // handle legacy tarball based slugbuilders (which are Docker based) slugBuilderImage, err := client.GetArtifact(slugBuilderID) if err != nil { return fmt.Errorf("unable to get slugbuilder image artifact: %s", err) } if slugBuilderImage.Type == ct.DeprecatedArtifactTypeDocker { uploadLegacySlug = true release.ArtifactIDs = []string{slugRunnerID} return nil } // Use slugbuilder to convert the legacy slug to a // Flynn squashfs image slugImageID := random.UUID() config := runConfig{ App: app.ID, Release: gitreceiveRelease.ID, ReleaseEnv: true, Artifacts: []string{slugBuilderID}, DisableLog: true, Args: []string{"/bin/convert-legacy-slug.sh"}, Stdin: legacySlug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, Env: map[string]string{"SLUG_IMAGE_ID": slugImageID}, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } release.ID = "" release.ArtifactIDs = []string{slugRunnerID, slugImageID} if release.Meta == nil { release.Meta = make(map[string]string, 1) } release.Meta["git"] = "true" return nil }(); err != nil { return err } } else if dockerImage.config.Tag != "" && dockerImage.archive != nil { // load the docker image into the Docker daemon cmd := exec.Command("docker", "load") cmd.Stdin = dockerImage.archive if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("error running docker load: %s: %q", err, out) } // use the tag from the config (which will now be applied to // the loaded image) to push the image to docker-receive cluster, err := getCluster() if err != nil { return err } host, err := cluster.DockerPushHost() if err != nil { return err } tag := fmt.Sprintf("%s/%s:flynn-import-%s", host, app.Name, random.String(8)) if out, err := exec.Command("docker", "tag", dockerImage.config.Tag, tag).CombinedOutput(); err != nil { return fmt.Errorf("error tagging docker image: %s: %q", err, out) } artifact, err := dockerPush(client, app.Name, tag) if err != nil { return fmt.Errorf("error pushing docker image: %s", err) } release.ArtifactIDs = []string{artifact.ID} } else if len(artifacts) > 0 { // import blobstore Flynn artifacts blobstoreRelease, err := client.GetAppRelease("blobstore") if err != nil { return fmt.Errorf("unable to retrieve blobstore release: %s", err) } upload := func(id, url string) error { layer, ok := layers[id] if !ok { return fmt.Errorf("missing layer in export: %s", id) } config := runConfig{ App: app.ID, Release: blobstoreRelease.ID, DisableLog: true, Args: []string{"curl", "--request", "PUT", "--upload-file", "-", url}, Stdin: layer, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading layer: %s", err) } return nil } release.ArtifactIDs = make([]string, len(artifacts)) for i, artifact := range artifacts { if artifact.Type != ct.ArtifactTypeFlynn { continue } if !artifact.Blobstore() { continue } for _, rootfs := range artifact.Manifest().Rootfs { for _, layer := range rootfs.Layers { if err := upload(layer.ID, artifact.LayerURL(layer)); err != nil { return err } } } artifact.ID = "" if err := client.CreateArtifact(artifact); err != nil { return fmt.Errorf("error creating artifact: %s", err) } release.ArtifactIDs[i] = artifact.ID } // use the current slugrunner image for slug releases if release.IsGitDeploy() { gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"] if !ok { return fmt.Errorf("gitreceive env missing slugrunner image") } release.ArtifactIDs[0] = slugRunnerID } } if release != nil { for t, proc := range release.Processes { // update legacy slug releases to use Args rather than the // deprecated Entrypoint and Cmd fields if release.IsGitDeploy() && len(proc.Args) == 0 { proc.Args = append([]string{"/runner/init"}, proc.DeprecatedCmd...) proc.DeprecatedCmd = nil } for i, port := range proc.Ports { if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) { proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1) } } release.Processes[t] = proc } if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if uploadLegacySlug { slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID()) config := runConfig{ App: app.ID, Release: release.ID, DisableLog: true, Args: []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI}, Stdin: legacySlug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } slugArtifact := &ct.Artifact{ Type: ct.DeprecatedArtifactTypeFile, URI: slugURI, } if err := client.CreateArtifact(slugArtifact); err != nil { return fmt.Errorf("error creating slug artifact: %s", err) } release.ID = "" release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID) if release.Meta == nil { release.Meta = make(map[string]string, 1) } release.Meta["git"] = "true" if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if formation != nil && release != nil { formation.ReleaseID = release.ID formation.AppID = app.ID if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error creating formation: %s", err) } } if args.Bool["--routes"] { for _, route := range routes { if err := client.CreateRoute(app.ID, &route); err != nil { if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode { // If the cluster domain matches then the default route // exported will conflict with the one created automatically. continue } return fmt.Errorf("error creating route: %s", err) } numRoutes++ } } fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources) return nil }