Exemplo n.º 1
1
// actionRunner handles running an action which may take a while to complete
// providing progress bars and signal handling.
func actionRunner(cmd *cli.Cmd, action action) func() {
	cmd.Spec = "[--silent] [--no-progress] " + cmd.Spec
	silent := cmd.BoolOpt("silent", false, "Set to true to disable all non-error output")
	noProgress := cmd.BoolOpt("no-progress", false, "Set to true to disable the progress bar")

	return func() {
		var infoWriter io.Writer = os.Stderr
		var ticker <-chan time.Time

		if err := action.init(); err != nil {
			fail("Initialization failed: %v", err)
		}

		done, err := action.start(infoWriter)
		if err != nil {
			fail("Startup failed: %v", err)
		}

		var bar *pb.ProgressBar
		if !*silent && !*noProgress {
			ticker = time.Tick(statsFrequency)
			bar = action.newProgressBar()
			if bar != nil {
				bar.Output = os.Stderr
				bar.ShowSpeed = true
				bar.ManualUpdate = true
				bar.SetMaxWidth(78)
				bar.Start()
				bar.Update()
			}
		}
		if *silent {
			infoWriter = ioutil.Discard
		}

		sigchan := make(chan os.Signal, 1)
		signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGINT)

	LOOP:
		for {
			select {
			case <-ticker:
				action.updateProgress(bar)
				bar.Update()

			case <-sigchan:
				bar.Finish()
				fmt.Fprintf(os.Stderr, "\nAborting..")
				action.abort()
				<-done
				fmt.Fprintf(os.Stderr, "Aborted.\n")
				break LOOP

			case err := <-done:
				if err != nil {
					fail("Processing failed: %v", err)
				}
				break LOOP
			}
		}
		if bar != nil {
			bar.Finish()
		}

		if !*silent {
			action.printFinalStats(infoWriter)
		}
	}
}
Exemplo n.º 2
0
func customizeBar(bar *pb.ProgressBar) {
	bar.ShowCounters = true
	bar.ShowTimeLeft = false
	bar.ShowSpeed = true
	bar.SetMaxWidth(80)
	bar.SetUnits(pb.U_BYTES)
}
Exemplo n.º 3
0
func runClusterBackup(args *docopt.Args) error {
	client, err := getClusterClient()
	if err != nil {
		return err
	}

	var bar *pb.ProgressBar
	var progress backup.ProgressBar
	if term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.ShowBar = false
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		progress = bar
	}

	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return err
		}
		defer f.Close()
		dest = f
	}

	fmt.Fprintln(os.Stderr, "Creating cluster backup...")

	if err := backup.Run(client, dest, progress); err != nil {
		return err
	}
	if bar != nil {
		bar.Finish()
	}
	fmt.Fprintln(os.Stderr, "Backup complete.")

	return nil
}
Exemplo n.º 4
0
// download a file with the HTTP/HTTPS protocol showing a progress bar. The destination file is
// always overwritten.
func download(rawurl string, destinationPath string) {
	tempDestinationPath := destinationPath + ".tmp"

	destination, err := os.Create(tempDestinationPath)
	if err != nil {
		log.Fatalf("Unable to open the destination file: %s", tempDestinationPath)
	}
	defer destination.Close()

	response, err := customGet(rawurl)
	if err != nil {
		log.Fatalf("Unable to open a connection to %s", rawurl)
	}
	defer response.Body.Close()

	if response.StatusCode != http.StatusOK {
		log.Fatalf("Unexpected HTTP response code. Wanted 200 but got %d", response.StatusCode)
	}

	var progressBar *pb.ProgressBar

	contentLength, err := strconv.Atoi(response.Header.Get("Content-Length"))
	if err == nil {
		progressBar = pb.New(int(contentLength))
	} else {
		progressBar = pb.New(0)
	}
	defer progressBar.Finish()

	progressBar.ShowSpeed = true
	progressBar.SetRefreshRate(time.Millisecond * 1000)
	progressBar.SetUnits(pb.U_BYTES)
	progressBar.Start()

	writer := io.MultiWriter(destination, progressBar)

	io.Copy(writer, response.Body)
	destination.Close()
	os.Rename(tempDestinationPath, destinationPath)
}
Exemplo n.º 5
0
// Fetch http file url to destination dest, with or without progress.
func FetchHTTPFile(url string, dest string, progress bool) (err error) {
	gologit.Debugf("Creating file: %s\n", dest)
	out, err := os.Create(dest)
	if err != nil {
		return err
	}
	defer out.Close()

	var r io.Reader

	gologit.Debugf("Fetching url: %s\n", url)
	resp, err := http.Get(url)
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("Server return non-200 status: %v", resp.Status)
	}

	msgPrefix := fmt.Sprintf("%s: ", path.Base(dest))
	var bar *pb.ProgressBar
	i, _ := strconv.Atoi(resp.Header.Get("Content-Length"))
	if i > 0 && progress {
		bar = pb.New(i).Prefix(msgPrefix).SetUnits(pb.U_BYTES)
		bar.ShowSpeed = true
		bar.RefreshRate = time.Millisecond * 700
		bar.ShowFinalTime = false
		bar.ShowTimeLeft = false
		bar.Start()
		defer bar.Finish()
		r = bar.NewProxyReader(resp.Body)
	} else {
		r = resp.Body
	}
	_, err = io.Copy(out, r)
	return err
}
Exemplo n.º 6
0
func runImport(args *docopt.Args, client controller.Client) error {
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app           *ct.App
		release       *ct.Release
		imageArtifact *ct.Artifact
		formation     *ct.Formation
		routes        []router.Route
		slug          io.Reader
		dockerImage   struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		switch path.Base(header.Name) {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifact.json":
			imageArtifact = &ct.Artifact{}
			if err := json.NewDecoder(tr).Decode(imageArtifact); err != nil {
				return fmt.Errorf("error decoding image artifact: %s", err)
			}
			imageArtifact.ID = ""
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			slug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	uploadSlug := release != nil && imageArtifact != nil && slug != nil

	if uploadSlug {
		// Use current slugrunner as the artifact
		gitreceiveRelease, err := client.GetAppRelease("gitreceive")
		if err != nil {
			return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
		}
		if id, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]; ok {
			imageArtifact, err = client.GetArtifact(id)
			if err != nil {
				return fmt.Errorf("unable to get slugrunner image artifact: %s", err)
			}
		} else if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
			imageArtifact = &ct.Artifact{
				Type: host.ArtifactTypeDocker,
				URI:  uri,
			}
		} else {
			return fmt.Errorf("gitreceive env missing slug runner image")
		}
	}

	if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:latest", host, app.Name)
		if out, err := exec.Command("docker", "tag", "--force", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if imageArtifact != nil {
		if imageArtifact.ID == "" {
			if err := client.CreateArtifact(imageArtifact); err != nil {
				return fmt.Errorf("error creating image artifact: %s", err)
			}
		}
		release.ArtifactIDs = []string{imageArtifact.ID}
	}

	if release != nil {
		for t, proc := range release.Processes {
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadSlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      slug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: host.ArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}
Exemplo n.º 7
0
func runImport(args *docopt.Args, client controller.Client) error {
	jobs, err := strconv.Atoi(args.String["--jobs"])
	if err != nil {
		return err
	}
	var src io.Reader = os.Stdin
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Open(filename)
		if err != nil {
			return fmt.Errorf("error opening export file: %s", err)
		}
		defer f.Close()
		src = f
	}
	tr := tar.NewReader(src)

	var (
		app         *ct.App
		release     *ct.Release
		artifacts   []*ct.Artifact
		formation   *ct.Formation
		routes      []router.Route
		legacySlug  io.Reader
		dockerImage struct {
			config struct {
				Tag string `json:"tag"`
			}
			archive io.Reader
		}
		pgDump     io.Reader
		mysqlDump  io.Reader
		uploadSize int64
	)
	numResources := 0
	numRoutes := 1
	layers := make(map[string]io.Reader)

	for {
		header, err := tr.Next()
		if err == io.EOF {
			break
		} else if err != nil {
			return fmt.Errorf("error reading export tar: %s", err)
		}

		filename := path.Base(header.Name)
		if strings.HasSuffix(filename, ".layer") {
			f, err := ioutil.TempFile("", "flynn-layer-")
			if err != nil {
				return fmt.Errorf("error creating layer tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading %s: %s", header.Name, err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking layer tempfile: %s", err)
			}
			layers[strings.TrimSuffix(filename, ".layer")] = f
			uploadSize += header.Size
			continue
		}

		switch filename {
		case "app.json":
			app = &ct.App{}
			if err := json.NewDecoder(tr).Decode(app); err != nil {
				return fmt.Errorf("error decoding app: %s", err)
			}
			app.ID = ""
		case "release.json":
			release = &ct.Release{}
			if err := json.NewDecoder(tr).Decode(release); err != nil {
				return fmt.Errorf("error decoding release: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = nil
		case "artifacts.json":
			if err := json.NewDecoder(tr).Decode(&artifacts); err != nil {
				return fmt.Errorf("error decoding artifacts: %s", err)
			}
		case "formation.json":
			formation = &ct.Formation{}
			if err := json.NewDecoder(tr).Decode(formation); err != nil {
				return fmt.Errorf("error decoding formation: %s", err)
			}
			formation.AppID = ""
			formation.ReleaseID = ""
		case "routes.json":
			if err := json.NewDecoder(tr).Decode(&routes); err != nil {
				return fmt.Errorf("error decoding routes: %s", err)
			}
			for _, route := range routes {
				route.ID = ""
				route.ParentRef = ""
			}
		case "slug.tar.gz":
			f, err := ioutil.TempFile("", "slug.tar.gz")
			if err != nil {
				return fmt.Errorf("error creating slug tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading slug: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking slug tempfile: %s", err)
			}
			legacySlug = f
			uploadSize += header.Size
		case "docker-image.json":
			if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil {
				return fmt.Errorf("error decoding docker image json: %s", err)
			}
		case "docker-image.tar":
			f, err := ioutil.TempFile("", "docker-image.tar")
			if err != nil {
				return fmt.Errorf("error creating docker image tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading docker image: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking docker image tempfile: %s", err)
			}
			dockerImage.archive = f
			uploadSize += header.Size
		case "postgres.dump":
			f, err := ioutil.TempFile("", "postgres.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			pgDump = f
			uploadSize += header.Size
		case "mysql.dump":
			f, err := ioutil.TempFile("", "mysql.dump")
			if err != nil {
				return fmt.Errorf("error creating db tempfile: %s", err)
			}
			defer f.Close()
			defer os.Remove(f.Name())
			if _, err := io.Copy(f, tr); err != nil {
				return fmt.Errorf("error reading db dump: %s", err)
			}
			if _, err := f.Seek(0, os.SEEK_SET); err != nil {
				return fmt.Errorf("error seeking db tempfile: %s", err)
			}
			mysqlDump = f
			uploadSize += header.Size
		}
	}

	if app == nil {
		return fmt.Errorf("missing app.json")
	}
	oldName := app.Name
	if name := args.String["--name"]; name != "" {
		app.Name = name
	}
	if err := client.CreateApp(app); err != nil {
		return fmt.Errorf("error creating app: %s", err)
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.Total = uploadSize
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if pgDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "postgres",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning postgres resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getPgRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting postgres config: %s", err)
		}
		config.Stdin = pgDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := pgRestore(client, config, jobs); err != nil {
			return fmt.Errorf("error restoring postgres database: %s", err)
		}
	}

	if mysqlDump != nil && release != nil {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "mysql",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning mysql resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}

		config, err := getMysqlRunConfig(client, app.ID, release)
		if err != nil {
			return fmt.Errorf("error getting mysql config: %s", err)
		}
		config.Stdin = mysqlDump
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		config.Exit = false
		if err := mysqlRestore(client, config); err != nil {
			return fmt.Errorf("error restoring mysql database: %s", err)
		}
	}

	if release != nil && release.Env["FLYNN_REDIS"] != "" {
		res, err := client.ProvisionResource(&ct.ResourceReq{
			ProviderID: "redis",
			Apps:       []string{app.ID},
		})
		if err != nil {
			return fmt.Errorf("error provisioning redis resource: %s", err)
		}
		numResources++

		if release.Env == nil {
			release.Env = make(map[string]string, len(res.Env))
		}
		for k, v := range res.Env {
			release.Env[k] = v
		}
	}

	var uploadLegacySlug bool

	if legacySlug != nil {
		if err := func() error {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}

			// handle legacy clusters which reference Docker image URIs
			if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok {
				artifact := &ct.Artifact{
					Type: ct.DeprecatedArtifactTypeDocker,
					URI:  uri,
				}
				if err := client.CreateArtifact(artifact); err != nil {
					return fmt.Errorf("error creating image artifact: %s", err)
				}
				uploadLegacySlug = true
				release.ArtifactIDs = []string{artifact.ID}
				return nil
			}

			slugBuilderID, ok := gitreceiveRelease.Env["SLUGBUILDER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugbuilder image")
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}

			// handle legacy tarball based slugbuilders (which are Docker based)
			slugBuilderImage, err := client.GetArtifact(slugBuilderID)
			if err != nil {
				return fmt.Errorf("unable to get slugbuilder image artifact: %s", err)
			}
			if slugBuilderImage.Type == ct.DeprecatedArtifactTypeDocker {
				uploadLegacySlug = true
				release.ArtifactIDs = []string{slugRunnerID}
				return nil
			}

			// Use slugbuilder to convert the legacy slug to a
			// Flynn squashfs image
			slugImageID := random.UUID()
			config := runConfig{
				App:        app.ID,
				Release:    gitreceiveRelease.ID,
				ReleaseEnv: true,
				Artifacts:  []string{slugBuilderID},
				DisableLog: true,
				Args:       []string{"/bin/convert-legacy-slug.sh"},
				Stdin:      legacySlug,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
				Env:        map[string]string{"SLUG_IMAGE_ID": slugImageID},
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading slug: %s", err)
			}
			release.ID = ""
			release.ArtifactIDs = []string{slugRunnerID, slugImageID}
			if release.Meta == nil {
				release.Meta = make(map[string]string, 1)
			}
			release.Meta["git"] = "true"
			return nil
		}(); err != nil {
			return err
		}
	} else if dockerImage.config.Tag != "" && dockerImage.archive != nil {
		// load the docker image into the Docker daemon
		cmd := exec.Command("docker", "load")
		cmd.Stdin = dockerImage.archive
		if out, err := cmd.CombinedOutput(); err != nil {
			return fmt.Errorf("error running docker load: %s: %q", err, out)
		}

		// use the tag from the config (which will now be applied to
		// the loaded image) to push the image to docker-receive
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}
		tag := fmt.Sprintf("%s/%s:flynn-import-%s", host, app.Name, random.String(8))
		if out, err := exec.Command("docker", "tag", dockerImage.config.Tag, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}

		artifact, err := dockerPush(client, app.Name, tag)
		if err != nil {
			return fmt.Errorf("error pushing docker image: %s", err)
		}

		release.ArtifactIDs = []string{artifact.ID}
	} else if len(artifacts) > 0 {
		// import blobstore Flynn artifacts
		blobstoreRelease, err := client.GetAppRelease("blobstore")
		if err != nil {
			return fmt.Errorf("unable to retrieve blobstore release: %s", err)
		}
		upload := func(id, url string) error {
			layer, ok := layers[id]
			if !ok {
				return fmt.Errorf("missing layer in export: %s", id)
			}
			config := runConfig{
				App:        app.ID,
				Release:    blobstoreRelease.ID,
				DisableLog: true,
				Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", url},
				Stdin:      layer,
				Stdout:     ioutil.Discard,
				Stderr:     ioutil.Discard,
			}
			if bar != nil {
				config.Stdin = bar.NewProxyReader(config.Stdin)
			}
			if err := runJob(client, config); err != nil {
				return fmt.Errorf("error uploading layer: %s", err)
			}
			return nil
		}

		release.ArtifactIDs = make([]string, len(artifacts))
		for i, artifact := range artifacts {
			if artifact.Type != ct.ArtifactTypeFlynn {
				continue
			}
			if !artifact.Blobstore() {
				continue
			}
			for _, rootfs := range artifact.Manifest().Rootfs {
				for _, layer := range rootfs.Layers {
					if err := upload(layer.ID, artifact.LayerURL(layer)); err != nil {
						return err
					}
				}
			}
			artifact.ID = ""
			if err := client.CreateArtifact(artifact); err != nil {
				return fmt.Errorf("error creating artifact: %s", err)
			}
			release.ArtifactIDs[i] = artifact.ID
		}

		// use the current slugrunner image for slug releases
		if release.IsGitDeploy() {
			gitreceiveRelease, err := client.GetAppRelease("gitreceive")
			if err != nil {
				return fmt.Errorf("unable to retrieve gitreceive release: %s", err)
			}
			slugRunnerID, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]
			if !ok {
				return fmt.Errorf("gitreceive env missing slugrunner image")
			}
			release.ArtifactIDs[0] = slugRunnerID
		}
	}

	if release != nil {
		for t, proc := range release.Processes {
			// update legacy slug releases to use Args rather than the
			// deprecated Entrypoint and Cmd fields
			if release.IsGitDeploy() && len(proc.Args) == 0 {
				proc.Args = append([]string{"/runner/init"}, proc.DeprecatedCmd...)
				proc.DeprecatedCmd = nil
			}
			for i, port := range proc.Ports {
				if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) {
					proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1)
				}
			}
			release.Processes[t] = proc
		}
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if uploadLegacySlug {
		slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID())
		config := runConfig{
			App:        app.ID,
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI},
			Stdin:      legacySlug,
			Stdout:     ioutil.Discard,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdin = bar.NewProxyReader(config.Stdin)
		}
		if err := runJob(client, config); err != nil {
			return fmt.Errorf("error uploading slug: %s", err)
		}
		slugArtifact := &ct.Artifact{
			Type: ct.DeprecatedArtifactTypeFile,
			URI:  slugURI,
		}
		if err := client.CreateArtifact(slugArtifact); err != nil {
			return fmt.Errorf("error creating slug artifact: %s", err)
		}
		release.ID = ""
		release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID)
		if release.Meta == nil {
			release.Meta = make(map[string]string, 1)
		}
		release.Meta["git"] = "true"
		if err := client.CreateRelease(release); err != nil {
			return fmt.Errorf("error creating release: %s", err)
		}
		if err := client.SetAppRelease(app.ID, release.ID); err != nil {
			return fmt.Errorf("error setting app release: %s", err)
		}
	}

	if formation != nil && release != nil {
		formation.ReleaseID = release.ID
		formation.AppID = app.ID
		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("error creating formation: %s", err)
		}
	}

	if args.Bool["--routes"] {
		for _, route := range routes {
			if err := client.CreateRoute(app.ID, &route); err != nil {
				if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode {
					// If the cluster domain matches then the default route
					// exported will conflict with the one created automatically.
					continue
				}
				return fmt.Errorf("error creating route: %s", err)
			}
			numRoutes++
		}
	}

	fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources)

	return nil
}