Пример #1
0
func main() {
	serviceName := os.Getenv("FLYNN_POSTGRES")
	if serviceName == "" {
		serviceName = "postgres"
	}
	singleton := os.Getenv("SINGLETON") == "true"
	password := os.Getenv("PGPASSWORD")

	const dataDir = "/data"
	idFile := filepath.Join(dataDir, "instance_id")
	idBytes, err := ioutil.ReadFile(idFile)
	if err != nil && !os.IsNotExist(err) {
		shutdown.Fatalf("error reading instance ID: %s", err)
	}
	id := string(idBytes)
	if len(id) == 0 {
		id = random.UUID()
		if err := ioutil.WriteFile(idFile, []byte(id), 0644); err != nil {
			shutdown.Fatalf("error writing instance ID: %s", err)
		}
	}

	err = discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{
		LeaderType: discoverd.LeaderTypeManual,
	})
	if err != nil && !httphelper.IsObjectExistsError(err) {
		shutdown.Fatal(err)
	}
	inst := &discoverd.Instance{
		Addr: ":5432",
		Meta: map[string]string{pgIdKey: id},
	}
	hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log := log15.New("app", "postgres")

	pg := NewPostgres(Config{
		ID:           id,
		Singleton:    singleton,
		DataDir:      filepath.Join(dataDir, "db"),
		BinDir:       "/usr/lib/postgresql/9.5/bin/",
		Password:     password,
		Logger:       log.New("component", "postgres"),
		ExtWhitelist: true,
		WaitUpstream: true,
		SHMType:      "posix",
	})
	dd := sd.NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd"))

	peer := state.NewPeer(inst, id, pgIdKey, singleton, dd, pg, log.New("component", "peer"))
	shutdown.BeforeExit(func() { peer.Close() })

	go peer.Run()
	shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http")))
	// TODO(titanous): clean shutdown of postgres
}
Пример #2
0
func NewInstaller(l log.Logger) *Installer {
	installer := &Installer{
		subscriptions: make([]*Subscription, 0),
		clusters:      make([]Cluster, 0),
		logger:        l,
	}
	if err := installer.openDB(); err != nil {
		if err.Error() == "resource temporarily unavailable" {
			shutdown.Fatal("Error: Another `flynn install` process is already running.")
		}
		shutdown.Fatalf("Error opening database: %s", err)
	}
	if err := installer.loadEventsFromDB(); err != nil {
		shutdown.Fatalf("Error loading events from database: %s", err)
	}
	return installer
}
Пример #3
0
func main() {
	defer shutdown.Exit()

	addr := ":" + os.Getenv("PORT")

	db := postgres.Wait(nil, nil)
	if err := dbMigrations.Migrate(db); err != nil {
		shutdown.Fatalf("error running DB migrations: %s", err)
	}

	mux := http.NewServeMux()

	repo, err := data.NewFileRepoFromEnv(db)
	if err != nil {
		shutdown.Fatal(err)
	}

	hb, err := discoverd.AddServiceAndRegister("blobstore", addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log.Println("Blobstore serving files on " + addr)

	mux.Handle("/", handler(repo))
	mux.Handle(status.Path, status.Handler(func() status.Status {
		if err := db.Exec("SELECT 1"); err != nil {
			return status.Unhealthy
		}
		return status.Healthy
	}))

	h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux))
	shutdown.Fatal(http.ListenAndServe(addr, h))
}
Пример #4
0
// Update performs a zero-downtime update of the flynn-host daemon, replacing
// the current daemon with an instance of the given command.
//
// The HTTP API listener is passed from the parent to the child, but due to the
// state DBs being process exclusive and requiring initialisation, further
// syncronisation is required to manage opening and closing them, which is done
// using a control socket.
//
// Any partial log lines read by the parent are also passed to the child to
// avoid dropping logs or sending partial logs over two lines.
//
// An outline of the process:
//
// * parent receives a request to exec a new daemon
//
// * parent creates a control socket pair (via socketpair(2))
//
// * parent starts a child process, passing the API listener as FD 3, and a
//   control socket as FD 4
//
// * parent closes its API listener FD, state DBs and log followers.
//
// * parent signals the child to resume by sending "resume" message to control
//   socket, followed by any partial log buffers.
//
// * child receives resume request, opens state DBs, seeds the log followers
//   with the partial buffers and starts serving API requests
//
// * child signals parent it is now serving requests by sending "ok" message to
//   control socket
//
// * parent sends response to client and shuts down seconds later
//
func (h *Host) Update(cmd *host.Command) error {
	log := h.log.New("fn", "Update")

	// dup the listener so we can close the current listener but still be
	// able continue serving requests if the child exits by using the dup'd
	// listener.
	log.Info("duplicating HTTP listener")
	file, err := h.listener.(*net.TCPListener).File()
	if err != nil {
		log.Error("error duplicating HTTP listener", "err", err)
		return err
	}
	defer file.Close()

	// exec a child, passing the listener and control socket as extra files
	log.Info("creating child process")
	child, err := h.exec(cmd, file)
	if err != nil {
		log.Error("error creating child process", "err", err)
		return err
	}
	defer child.CloseSock()

	// close our listener and state DBs
	log.Info("closing HTTP listener")
	h.listener.Close()
	log.Info("closing state databases")
	if err := h.CloseDBs(); err != nil {
		log.Error("error closing state databases", "err", err)
		return err
	}

	log.Info("closing logs")
	buffers, err := h.CloseLogs()
	if err != nil {
		log.Error("error closing logs", "err", err)
		return err
	}

	log.Info("resuming child process")
	if resumeErr := child.Resume(buffers); resumeErr != nil {
		log.Error("error resuming child process", "err", resumeErr)

		// The child failed to resume, kill it and resume ourselves.
		//
		// If anything fails here, exit rather than returning an error
		// so a new host process can be started (rather than this
		// process sitting around not serving requests).
		log.Info("killing child process")
		child.Kill()

		log.Info("reopening logs")
		if err := h.OpenLogs(buffers); err != nil {
			shutdown.Fatalf("error reopening logs after failed update: %s", err)
		}

		log.Error("recreating HTTP listener")
		l, err := net.FileListener(file)
		if err != nil {
			shutdown.Fatalf("error recreating HTTP listener after failed update: %s", err)
		}
		h.listener = l

		log.Info("reopening state databases")
		if err := h.OpenDBs(); err != nil {
			shutdown.Fatalf("error reopening state databases after failed update: %s", err)
		}

		log.Info("serving HTTP requests")
		h.ServeHTTP()

		return resumeErr
	}

	return nil
}
Пример #5
0
func runExport(args *docopt.Args, client controller.Client) error {
	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return fmt.Errorf("error creating export file: %s", err)
		}
		defer f.Close()
		dest = f
	}

	app, err := client.GetApp(mustApp())
	if err != nil {
		return fmt.Errorf("error getting app: %s", err)
	}

	var bar backup.ProgressBar
	if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) {
		b := pb.New(0)
		b.SetUnits(pb.U_BYTES)
		b.ShowBar = false
		b.ShowSpeed = true
		b.Output = os.Stderr
		b.Start()
		defer b.Finish()
		bar = b
	}

	tw := backup.NewTarWriter(app.Name, dest, bar)
	defer tw.Close()

	if err := tw.WriteJSON("app.json", app); err != nil {
		return fmt.Errorf("error exporting app: %s", err)
	}

	routes, err := client.RouteList(mustApp())
	if err != nil {
		return fmt.Errorf("error getting routes: %s", err)
	}
	if err := tw.WriteJSON("routes.json", routes); err != nil {
		return fmt.Errorf("error exporting routes: %s", err)
	}

	release, err := client.GetAppRelease(mustApp())
	if err == controller.ErrNotFound {
		// if the app has no release then there is nothing more to export
		return nil
	} else if err != nil {
		return fmt.Errorf("error retrieving app: %s", err)
	} else if err == nil {
		// Do not allow the exporting of passwords.
		delete(release.Env, "REDIS_PASSWORD")

		if err := tw.WriteJSON("release.json", release); err != nil {
			return fmt.Errorf("error exporting release: %s", err)
		}
	}

	var artifact *ct.Artifact
	if artifactID := release.ImageArtifactID(); artifactID != "" {
		artifact, err = client.GetArtifact(artifactID)
		if err != nil && err != controller.ErrNotFound {
			return fmt.Errorf("error retrieving artifact: %s", err)
		} else if err == nil {
			if err := tw.WriteJSON("artifact.json", artifact); err != nil {
				return fmt.Errorf("error exporting artifact: %s", err)
			}
		}
	}

	formation, err := client.GetFormation(mustApp(), release.ID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving formation: %s", err)
	} else if err == nil {
		if err := tw.WriteJSON("formation.json", formation); err != nil {
			return fmt.Errorf("error exporting formation: %s", err)
		}
	}

	// if the release was deployed via docker-receive, pull the docker
	// image and add it to the export using "docker save"
	if release.IsDockerReceiveDeploy() && artifact != nil {
		cluster, err := getCluster()
		if err != nil {
			return err
		}
		host, err := cluster.DockerPushHost()
		if err != nil {
			return err
		}

		// the artifact will have an internal discoverd URL which will
		// not work if the Docker daemon is outside the cluster, so
		// generate a reference using the configured DockerPushURL
		repo := artifact.Meta["docker-receive.repository"]
		digest := artifact.Meta["docker-receive.digest"]
		ref := fmt.Sprintf("%s/%s@%s", host, repo, digest)

		// pull the Docker image
		cmd := exec.Command("docker", "pull", ref)
		log.Printf("flynn: pulling Docker image with %q", strings.Join(cmd.Args, " "))
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		if err := cmd.Run(); err != nil {
			return err
		}

		// give the image an explicit, random tag so that "docker save"
		// will export an image that we can reference on import (just
		// using the digest is not enough as "docker inspect" only
		// works with tags)
		tag := fmt.Sprintf("%s:flynn-export-%s", repo, random.String(8))
		if out, err := exec.Command("docker", "tag", "--force", ref, tag).CombinedOutput(); err != nil {
			return fmt.Errorf("error tagging docker image: %s: %q", err, out)
		}
		defer exec.Command("docker", "rmi", tag).Run()

		if err := dockerSave(tag, tw, bar); err != nil {
			return fmt.Errorf("error exporting docker image: %s", err)
		}

		// add the tag to the backup so we know how to reference the
		// image once it has been imported
		config := struct {
			Tag string `json:"tag"`
		}{tag}
		if err := tw.WriteJSON("docker-image.json", &config); err != nil {
			return fmt.Errorf("error exporting docker image: %s", err)
		}
	}

	// expect releases deployed via git to have a slug as their first file
	// artifact, and legacy releases to have SLUG_URL set
	var slugURL string
	if release.IsGitDeploy() && len(release.FileArtifactIDs()) > 0 {
		slugArtifact, err := client.GetArtifact(release.FileArtifactIDs()[0])
		if err != nil && err != controller.ErrNotFound {
			return fmt.Errorf("error retrieving slug artifact: %s", err)
		} else if err == nil {
			slugURL = slugArtifact.URI
		}
	} else if u, ok := release.Env["SLUG_URL"]; ok {
		slugURL = u
	}
	if slugURL != "" {
		reqR, reqW := io.Pipe()
		config := runConfig{
			App:        mustApp(),
			Release:    release.ID,
			DisableLog: true,
			Args:       []string{"curl", "--include", "--location", "--raw", slugURL},
			Stdout:     reqW,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdout = io.MultiWriter(config.Stdout, bar)
		}
		go func() {
			if err := runJob(client, config); err != nil {
				shutdown.Fatalf("error retrieving slug: %s", err)
			}
		}()
		req := bufio.NewReader(reqR)
		var res *http.Response
		maxRedirects := 5
		for i := 0; i < maxRedirects; i++ {
			res, err = http.ReadResponse(req, nil)
			if err != nil {
				return fmt.Errorf("error reading slug response: %s", err)
			}
			if res.StatusCode != http.StatusFound {
				break
			}
		}
		if res.StatusCode != http.StatusOK {
			return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode)
		}
		length, err := strconv.Atoi(res.Header.Get("Content-Length"))
		if err != nil {
			return fmt.Errorf("slug has missing or malformed Content-Length")
		}

		if err := tw.WriteHeader("slug.tar.gz", length); err != nil {
			return fmt.Errorf("error writing slug header: %s", err)
		}
		if _, err := io.Copy(tw, res.Body); err != nil {
			return fmt.Errorf("error writing slug: %s", err)
		}
		res.Body.Close()
	}

	if pgConfig, err := getAppPgRunConfig(client); err == nil {
		configPgDump(pgConfig)
		if err := tw.WriteCommandOutput(client, "postgres.dump", pgConfig.App, &ct.NewJob{
			ReleaseID:  pgConfig.Release,
			Args:       pgConfig.Args,
			Env:        pgConfig.Env,
			DisableLog: pgConfig.DisableLog,
		}); err != nil {
			return fmt.Errorf("error creating postgres dump: %s", err)
		}
	}

	if mysqlConfig, err := getAppMysqlRunConfig(client); err == nil {
		configMysqlDump(mysqlConfig)
		if err := tw.WriteCommandOutput(client, "mysql.dump", mysqlConfig.App, &ct.NewJob{
			ReleaseID:  mysqlConfig.Release,
			Args:       mysqlConfig.Args,
			Env:        mysqlConfig.Env,
			DisableLog: mysqlConfig.DisableLog,
		}); err != nil {
			return fmt.Errorf("error creating mysql dump: %s", err)
		}
	}

	return nil
}
Пример #6
0
func runExport(args *docopt.Args, client *controller.Client) error {
	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return fmt.Errorf("error creating export file: %s", err)
		}
		defer f.Close()
		dest = f
	}

	app, err := client.GetApp(mustApp())
	if err != nil {
		return fmt.Errorf("error getting app: %s", err)
	}

	tw := backup.NewTarWriter(app.Name, dest)
	defer tw.Close()

	if err := tw.WriteJSON("app.json", app); err != nil {
		return fmt.Errorf("error exporting app: %s", err)
	}

	routes, err := client.RouteList(mustApp())
	if err != nil {
		return fmt.Errorf("error getting routes: %s", err)
	}
	if err := tw.WriteJSON("routes.json", routes); err != nil {
		return fmt.Errorf("error exporting routes: %s", err)
	}

	release, err := client.GetAppRelease(mustApp())
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving app: %s", err)
	} else if err == nil {
		// Do not allow the exporting of passwords.
		delete(release.Env, "REDIS_PASSWORD")

		if err := tw.WriteJSON("release.json", release); err != nil {
			return fmt.Errorf("error exporting release: %s", err)
		}
	}

	artifact, err := client.GetArtifact(release.ArtifactID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving artifact: %s", err)
	} else if err == nil {
		if err := tw.WriteJSON("artifact.json", artifact); err != nil {
			return fmt.Errorf("error exporting artifact: %s", err)
		}
	}

	formation, err := client.GetFormation(mustApp(), release.ID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving formation: %s", err)
	} else if err == nil {
		if err := tw.WriteJSON("formation.json", formation); err != nil {
			return fmt.Errorf("error exporting formation: %s", err)
		}
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.ShowBar = false
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if slug, ok := release.Env["SLUG_URL"]; ok {
		reqR, reqW := io.Pipe()
		config := runConfig{
			App:        mustApp(),
			Release:    release.ID,
			DisableLog: true,
			Entrypoint: []string{"curl"},
			Args:       []string{"--include", "--raw", slug},
			Stdout:     reqW,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdout = io.MultiWriter(config.Stdout, bar)
		}
		go func() {
			if err := runJob(client, config); err != nil {
				shutdown.Fatalf("error retrieving slug: %s", err)
			}
		}()
		res, err := http.ReadResponse(bufio.NewReader(reqR), nil)
		if err != nil {
			return fmt.Errorf("error reading slug response: %s", err)
		}
		if res.StatusCode != 200 {
			return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode)
		}
		length, err := strconv.Atoi(res.Header.Get("Content-Length"))
		if err != nil {
			return fmt.Errorf("slug has missing or malformed Content-Length")
		}

		if err := tw.WriteHeader("slug.tar.gz", length); err != nil {
			return fmt.Errorf("error writing slug header: %s", err)
		}
		if _, err := io.Copy(tw, res.Body); err != nil {
			return fmt.Errorf("error writing slug: %s", err)
		}
		res.Body.Close()
	}

	if config, err := getAppPgRunConfig(client); err == nil {
		configPgDump(config)
		if err := tw.WriteCommandOutput(client, "postgres.dump", config.App, &ct.NewJob{
			ReleaseID:  config.Release,
			Entrypoint: config.Entrypoint,
			Cmd:        config.Args,
			Env:        config.Env,
			DisableLog: config.DisableLog,
		}); err != nil {
			return fmt.Errorf("error creating postgres dump: %s", err)
		}
	}

	return nil
}
Пример #7
0
func main() {
	defer shutdown.Exit()

	var cookieKey *[32]byte
	if key := os.Getenv("COOKIE_KEY"); key != "" {
		res, err := base64.StdEncoding.DecodeString(key)
		if err != nil {
			shutdown.Fatalf("error decoding COOKIE_KEY: %s", err)
		}
		if len(res) != 32 {
			shutdown.Fatalf("decoded %d bytes from COOKIE_KEY, expected 32", len(res))
		}
		var k [32]byte
		copy(k[:], res)
		cookieKey = &k
	}
	if cookieKey == nil {
		shutdown.Fatal("Missing random 32 byte base64-encoded COOKIE_KEY")
	}

	httpPort := flag.String("http-port", "8080", "http listen port")
	httpsPort := flag.String("https-port", "4433", "https listen port")
	tcpIP := flag.String("tcp-ip", os.Getenv("LISTEN_IP"), "tcp router listen ip")
	tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start")
	tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end")
	certFile := flag.String("tls-cert", "", "TLS (SSL) cert file in pem format")
	keyFile := flag.String("tls-key", "", "TLS (SSL) key file in pem format")
	apiPort := flag.String("api-port", "", "api listen port")
	flag.Parse()

	if *apiPort == "" {
		*apiPort = os.Getenv("PORT")
		if *apiPort == "" {
			*apiPort = "5000"
		}
	}

	keypair := tls.Certificate{}
	var err error
	if *certFile != "" {
		if keypair, err = tls.LoadX509KeyPair(*certFile, *keyFile); err != nil {
			shutdown.Fatal(err)
		}
	} else if tlsCert := os.Getenv("TLSCERT"); tlsCert != "" {
		if tlsKey := os.Getenv("TLSKEY"); tlsKey != "" {
			os.Setenv("TLSKEY", fmt.Sprintf("md5^(%s)", md5sum(tlsKey)))
			if keypair, err = tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)); err != nil {
				shutdown.Fatal(err)
			}
		}
	}

	log := logger.New("fn", "main")

	log.Info("connecting to postgres")
	db, err := postgres.Open("", "")
	if err != nil {
		log.Error("error connecting to postgres", "err", err)
		shutdown.Fatal(err)
	}
	log.Info("running DB migrations")
	if err := migrateDB(db.DB); err != nil {
		log.Error("error running DB migrations", "err", err)
		shutdown.Fatal(err)
	}

	var pgport int
	if port := os.Getenv("PGPORT"); port != "" {
		var err error
		if pgport, err = strconv.Atoi(port); err != nil {
			shutdown.Fatal(err)
		}
	}

	log.Info("creating postgres connection pool")
	pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{
		ConnConfig: pgx.ConnConfig{
			Host:     os.Getenv("PGHOST"),
			Port:     uint16(pgport),
			Database: os.Getenv("PGDATABASE"),
			User:     os.Getenv("PGUSER"),
			Password: os.Getenv("PGPASSWORD"),
		},
	})
	if err != nil {
		log.Error("error creating postgres connection pool", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { pgxpool.Close() })

	httpAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpPort)
	httpsAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpsPort)
	r := Router{
		TCP: &TCPListener{
			IP:        *tcpIP,
			startPort: *tcpRangeStart,
			endPort:   *tcpRangeEnd,
			ds:        NewPostgresDataStore("tcp", pgxpool),
			discoverd: discoverd.DefaultClient,
		},
		HTTP: &HTTPListener{
			Addr:      httpAddr,
			TLSAddr:   httpsAddr,
			cookieKey: cookieKey,
			keypair:   keypair,
			ds:        NewPostgresDataStore("http", pgxpool),
			discoverd: discoverd.DefaultClient,
		},
	}

	if err := r.Start(); err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(r.Close)

	apiAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *apiPort)
	log.Info("starting API listener")
	listener, err := listenFunc("tcp4", apiAddr)
	if err != nil {
		log.Error("error starting API listener", "err", err)
		shutdown.Fatal(listenErr{apiAddr, err})
	}

	services := map[string]string{
		"router-api":  apiAddr,
		"router-http": httpAddr,
	}
	for service, addr := range services {
		log.Info("registering service", "name", service, "addr", addr)
		hb, err := discoverd.AddServiceAndRegister(service, addr)
		if err != nil {
			log.Error("error registering service", "name", service, "addr", addr, "err", err)
			shutdown.Fatal(err)
		}
		shutdown.BeforeExit(func() { hb.Close() })
	}

	log.Info("serving API requests")
	shutdown.Fatal(http.Serve(listener, apiHandler(&r)))
}
Пример #8
0
func runExport(args *docopt.Args, client *controller.Client) error {
	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return fmt.Errorf("error creating export file: %s", err)
		}
		defer f.Close()
		dest = f
	}
	tw := tar.NewWriter(dest)
	defer tw.Close()

	uid := syscall.Getuid()
	header := func(name string, length int) error {
		return tw.WriteHeader(&tar.Header{
			Name:     path.Join(mustApp(), name),
			Mode:     0644,
			Size:     int64(length),
			ModTime:  time.Now(),
			Typeflag: tar.TypeReg,
			Uid:      uid,
			Gid:      uid,
		})
	}
	writeJSON := func(name string, v interface{}) error {
		data, err := json.MarshalIndent(v, "", "  ")
		if err != nil {
			return err
		}
		if err := header(name, len(data)+1); err != nil {
			return err
		}
		if _, err := tw.Write(data); err != nil {
			return err
		}
		if _, err := tw.Write([]byte("\n")); err != nil {
			return err
		}
		return nil
	}

	app, err := client.GetApp(mustApp())
	if err != nil {
		return fmt.Errorf("error getting app: %s", err)
	}
	if err := writeJSON("app.json", app); err != nil {
		return fmt.Errorf("error exporting app: %s", err)
	}

	release, err := client.GetAppRelease(mustApp())
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving app: %s", err)
	} else if err == nil {
		if err := writeJSON("release.json", release); err != nil {
			return fmt.Errorf("error exporting release: %s", err)
		}
	}

	artifact, err := client.GetArtifact(release.ArtifactID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving artifact: %s", err)
	} else if err == nil {
		if err := writeJSON("artifact.json", artifact); err != nil {
			return fmt.Errorf("error exporting artifact: %s", err)
		}
	}

	formation, err := client.GetFormation(mustApp(), release.ID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving formation: %s", err)
	} else if err == nil {
		if err := writeJSON("formation.json", formation); err != nil {
			return fmt.Errorf("error exporting formation: %s", err)
		}
	}

	var bar *pb.ProgressBar
	if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) {
		bar = pb.New(0)
		bar.SetUnits(pb.U_BYTES)
		bar.ShowBar = false
		bar.ShowSpeed = true
		bar.Output = os.Stderr
		bar.Start()
		defer bar.Finish()
	}

	if slug, ok := release.Env["SLUG_URL"]; ok {
		reqR, reqW := io.Pipe()
		config := runConfig{
			App:        mustApp(),
			Release:    release.ID,
			DisableLog: true,
			Entrypoint: []string{"curl"},
			Args:       []string{"--include", "--raw", slug},
			Stdout:     reqW,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdout = io.MultiWriter(config.Stdout, bar)
		}
		go func() {
			if err := runJob(client, config); err != nil {
				shutdown.Fatalf("error retrieving slug: %s", err)
			}
		}()
		res, err := http.ReadResponse(bufio.NewReader(reqR), nil)
		if err != nil {
			return fmt.Errorf("error reading slug response: %s", err)
		}
		if res.StatusCode != 200 {
			return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode)
		}
		length, err := strconv.Atoi(res.Header.Get("Content-Length"))
		if err != nil {
			return fmt.Errorf("slug has missing or malformed Content-Length")
		}

		if err := header("slug.tar.gz", length); err != nil {
			return fmt.Errorf("error writing slug header: %s", err)
		}
		if _, err := io.Copy(tw, res.Body); err != nil {
			return fmt.Errorf("error writing slug: %s", err)
		}
		res.Body.Close()
	}

	if config, err := getAppPgRunConfig(client); err == nil {
		f, err := ioutil.TempFile("", "postgres.dump")
		if err != nil {
			return fmt.Errorf("error creating db temp file: %s", err)
		}
		defer f.Close()
		defer os.Remove(f.Name())
		config.Stdout = f
		config.Exit = false

		if bar != nil {
			config.Stdout = io.MultiWriter(config.Stdout, bar)
		}
		if err := pgDump(client, config); err != nil {
			return fmt.Errorf("error dumping database: %s", err)
		}

		length, err := f.Seek(0, os.SEEK_CUR)
		if err != nil {
			return fmt.Errorf("error getting db size: %s", err)
		}
		if err := header("postgres.dump", int(length)); err != nil {
			return fmt.Errorf("error writing db header: %s", err)
		}
		if _, err := f.Seek(0, os.SEEK_SET); err != nil {
			return fmt.Errorf("error seeking db dump: %s", err)
		}
		if _, err := io.Copy(tw, f); err != nil {
			return fmt.Errorf("error exporting db: %s", err)
		}
	}

	return nil
}
Пример #9
0
func main() {
	// when starting a container with libcontainer, we first exec the
	// current binary with libcontainer-init as the first argument,
	// which triggers the following code to initialise the container
	// environment (namespaces, network etc.) then exec containerinit
	if len(os.Args) > 1 && os.Args[1] == "libcontainer-init" {
		runtime.GOMAXPROCS(1)
		runtime.LockOSThread()
		factory, _ := libcontainer.New("")
		if err := factory.StartInitialization(); err != nil {
			log.Fatal(err)
		}
	}

	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  signal                     Signal a job
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  list                       Lists ID and IP of each host
  version                    Show current version
  fix                        Fix a broken cluster
  tags                       Manage flynn-host daemon tags
  discover                   Return low-level information about a service

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				shutdown.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				shutdown.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		} else if _, ok := err.(cli.ErrAlreadyLogged); ok {
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
Пример #10
0
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	httpPort := args.String["--http-port"]
	externalIP := args.String["--external-ip"]
	listenIP := args.String["--listen-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	tags := parseTagArgs(args.String["--tags"])
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	volProvider := args.String["--vol-provider"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]
	bridgeName := args.String["--bridge-name"]

	logger, err := setupLogger(logDir)
	if err != nil {
		shutdown.Fatalf("error setting up logger: %s", err)
	}

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}

	var maxJobConcurrency uint64 = 4
	if m, err := strconv.ParseUint(args.String["--max-job-concurrency"], 10, 64); err == nil {
		maxJobConcurrency = m
	}

	var partitionCGroups = make(map[string]int64) // name -> cpu shares
	for _, p := range strings.Split(args.String["--partitions"], " ") {
		nameShares := strings.Split(p, "=cpu_shares:")
		if len(nameShares) != 2 {
			shutdown.Fatalf("invalid partition specifier: %q", p)
		}
		shares, err := strconv.ParseInt(nameShares[1], 10, 64)
		if err != nil || shares < 2 {
			shutdown.Fatalf("invalid cpu shares specifier: %q", shares)
		}
		partitionCGroups[nameShares[0]] = shares
	}
	for _, s := range []string{"user", "system", "background"} {
		if _, ok := partitionCGroups[s]; !ok {
			shutdown.Fatalf("missing mandatory resource partition: %s", s)
		}
	}

	log := logger.New("fn", "runDaemon", "host.id", hostID)
	log.Info("starting daemon")

	log.Info("validating host ID")
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		log.Info("detecting external IP")
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			log.Error("error detecting external IP", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("using external IP " + externalIP)
	}

	publishAddr := net.JoinHostPort(externalIP, httpPort)
	if discoveryToken != "" {
		// TODO: retry
		log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID)
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			log.Error("error registering with cluster discovery service", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("registered with cluster discovery service", "id", discoveryID)
	}

	state := NewState(hostID, stateFile)
	shutdown.BeforeExit(func() { state.CloseDB() })

	log.Info("initializing volume manager", "provider", volProvider)
	var newVolProvider func() (volume.Provider, error)
	switch volProvider {
	case "zfs":
		newVolProvider = func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			log.Info("determining ZFS zpool size")
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			log.Info(fmt.Sprintf("using ZFS zpool size %d", size))

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		}
	case "mock":
		newVolProvider = func() (volume.Provider, error) { return nil, nil }
	default:
		shutdown.Fatalf("unknown volume provider: %q", volProvider)
	}
	vman := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		newVolProvider,
	)
	shutdown.BeforeExit(func() { vman.CloseDB() })

	mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux"))

	log.Info("initializing job backend", "type", backendName)
	var backend Backend
	switch backendName {
	case "libcontainer":
		backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer"))
	case "mock":
		backend = MockBackend{}
	default:
		shutdown.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)
	backend.SetDefaultEnv("LISTEN_IP", listenIP)

	var buffers host.LogBuffers
	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr, tags)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:  hostID,
		url: publishURL,
		status: &host.HostStatus{
			ID:      hostID,
			PID:     os.Getpid(),
			URL:     publishURL,
			Tags:    tags,
			Version: version.String(),
		},
		state:   state,
		backend: backend,
		vman:    vman,
		discMan: discoverdManager,
		log:     logger.New("host.id", hostID),

		maxJobConcurrency: maxJobConcurrency,
	}
	backend.SetHost(host)

	// restore the host status if set in the environment
	if statusEnv := os.Getenv("FLYNN_HOST_STATUS"); statusEnv != "" {
		log.Info("restoring host status from parent")
		if err := json.Unmarshal([]byte(statusEnv), &host.status); err != nil {
			log.Error("error restoring host status from parent", "err", err)
			shutdown.Fatal(err)
		}
		pid := os.Getpid()
		log.Info("setting status PID", "pid", pid)
		host.status.PID = pid
		// keep the same tags as the parent
		discoverdManager.UpdateTags(host.status.Tags)
	}

	log.Info("creating HTTP listener")
	l, err := newHTTPListener(net.JoinHostPort(listenIP, httpPort))
	if err != nil {
		log.Error("error creating HTTP listener", "err", err)
		shutdown.Fatal(err)
	}
	host.listener = l
	shutdown.BeforeExit(func() { host.Close() })

	// if we have a control socket FD, wait for a "resume" message before
	// opening state DBs and serving requests.
	var controlFD int
	if fdEnv := os.Getenv("FLYNN_CONTROL_FD"); fdEnv != "" {
		log.Info("parsing control socket file descriptor")
		controlFD, err = strconv.Atoi(fdEnv)
		if err != nil {
			log.Error("error parsing control socket file descriptor", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("waiting for resume message from parent")
		msg := make([]byte, len(ControlMsgResume))
		if _, err := syscall.Read(controlFD, msg); err != nil {
			log.Error("error waiting for resume message from parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("validating resume message")
		if !bytes.Equal(msg, ControlMsgResume) {
			log.Error(fmt.Sprintf("unexpected resume message from parent: %v", msg))
			shutdown.ExitWithCode(1)
		}

		log.Info("receiving log buffers from parent")
		if err := json.NewDecoder(&controlSock{controlFD}).Decode(&buffers); err != nil {
			log.Error("error receiving log buffers from parent", "err", err)
			shutdown.Fatal(err)
		}
	}

	log.Info("opening state databases")
	if err := host.OpenDBs(); err != nil {
		log.Error("error opening state databases", "err", err)
		shutdown.Fatal(err)
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		log.Info("stopping all jobs except discoverd")
		if err := backend.Cleanup(except); err != nil {
			log.Error("error stopping all jobs except discoverd", "err", err)
			return err
		}
		for _, id := range except {
			log.Info("stopping discoverd")
			if e := backend.Stop(id); e != nil {
				log.Error("error stopping discoverd", "err", err)
				err = e
			}
		}
		return
	}

	log.Info("restoring state")
	resurrect, err := state.Restore(backend, buffers)
	if err != nil {
		log.Error("error restoring state", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		log.Info("unregistering with service discovery")
		if err := discoverdManager.Close(); err != nil {
			log.Error("error unregistering with service discovery", "err", err)
		}
		stopJobs()
	})

	log.Info("serving HTTP requests")
	host.ServeHTTP()

	if controlFD > 0 {
		// now that we are serving requests, send an "ok" message to the parent
		log.Info("sending ok message to parent")
		if _, err := syscall.Write(controlFD, ControlMsgOK); err != nil {
			log.Error("error sending ok message to parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("closing control socket")
		if err := syscall.Close(controlFD); err != nil {
			log.Error("error closing control socket", "err", err)
		}
	}

	if force {
		log.Info("forcibly stopping existing jobs")
		if err := stopJobs(); err != nil {
			log.Error("error forcibly stopping existing jobs", "err", err)
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		log.Info("getting cluster peer IPs")
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			log.Error("error getting discovery cluster", "err", err)
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
		log.Info("got cluster peer IPs", "peers", peerIPs)
	}
	log.Info("connecting to cluster peers")
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		log.Info("no cluster peers available")
	}

	if !args.Bool["--no-resurrect"] {
		log.Info("resurrecting jobs")
		resurrect()
	}

	monitor := NewMonitor(host.discMan, externalIP, logger)
	shutdown.BeforeExit(func() { monitor.Shutdown() })
	go monitor.Run()

	log.Info("blocking main goroutine")
	<-make(chan struct{})
}
Пример #11
0
func main() {
	serviceName := os.Getenv("FLYNN_MONGO")
	if serviceName == "" {
		serviceName = "mongodb"
	}
	singleton := os.Getenv("SINGLETON") == "true"
	password := os.Getenv("MONGO_PWD")
	httpPort := os.Getenv("HTTP_PORT")
	ip := os.Getenv("EXTERNAL_IP")
	if httpPort == "" {
		httpPort = "27018"
	}
	serverId := ipToId(net.ParseIP(ip))

	const dataDir = "/data"
	idFile := filepath.Join(dataDir, "instance_id")
	idBytes, err := ioutil.ReadFile(idFile)
	if err != nil && !os.IsNotExist(err) {
		shutdown.Fatalf("error reading instance ID: %s", err)
	}
	id := string(idBytes)
	if len(id) == 0 {
		id = random.UUID()
		if err := ioutil.WriteFile(idFile, []byte(id), 0644); err != nil {
			shutdown.Fatalf("error writing instance ID: %s", err)
		}
	}

	keyFile := filepath.Join(dataDir, "Keyfile")
	if err := ioutil.WriteFile(keyFile, []byte(password), 0600); err != nil {
		shutdown.Fatalf("error writing keyfile: %s", err)
	}

	err = discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{
		LeaderType: discoverd.LeaderTypeManual,
	})
	if err != nil && !httphelper.IsObjectExistsError(err) {
		shutdown.Fatal(err)
	}
	inst := &discoverd.Instance{
		Addr: ":" + mongodb.DefaultPort,
		Meta: map[string]string{mongoIdKey: id},
	}
	hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log := log15.New("app", "mongodb")

	process := mongodb.NewProcess()
	process.Password = password
	process.Singleton = singleton
	process.ServerID = serverId
	process.Host = ip

	dd := sd.NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd"))

	peer := state.NewPeer(inst, id, mongoIdKey, singleton, dd, process, log.New("component", "peer"))
	shutdown.BeforeExit(func() { peer.Close() })

	go peer.Run()

	handler := mongodb.NewHandler()
	handler.Process = process
	handler.Peer = peer
	handler.Heartbeater = hb
	handler.Logger = log.New("component", "http")

	shutdown.Fatal(http.ListenAndServe(":"+httpPort, handler))
}
Пример #12
0
func main() {
	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  signal                     Signal a job
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  list                       Lists ID and IP of each host
  version                    Show current version

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				shutdown.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				shutdown.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
Пример #13
0
func runExport(args *docopt.Args, client controller.Client) error {
	var dest io.Writer = os.Stdout
	if filename := args.String["--file"]; filename != "" {
		f, err := os.Create(filename)
		if err != nil {
			return fmt.Errorf("error creating export file: %s", err)
		}
		defer f.Close()
		dest = f
	}

	app, err := client.GetApp(mustApp())
	if err != nil {
		return fmt.Errorf("error getting app: %s", err)
	}

	var bar backup.ProgressBar
	if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) {
		b := pb.New(0)
		b.SetUnits(pb.U_BYTES)
		b.ShowBar = false
		b.ShowSpeed = true
		b.Output = os.Stderr
		b.Start()
		defer b.Finish()
		bar = b
	}

	tw := backup.NewTarWriter(app.Name, dest, bar)
	defer tw.Close()

	if err := tw.WriteJSON("app.json", app); err != nil {
		return fmt.Errorf("error exporting app: %s", err)
	}

	routes, err := client.RouteList(mustApp())
	if err != nil {
		return fmt.Errorf("error getting routes: %s", err)
	}
	if err := tw.WriteJSON("routes.json", routes); err != nil {
		return fmt.Errorf("error exporting routes: %s", err)
	}

	release, err := client.GetAppRelease(mustApp())
	if err == controller.ErrNotFound {
		// if the app has no release then there is nothing more to export
		return nil
	} else if err != nil {
		return fmt.Errorf("error retrieving app: %s", err)
	} else if err == nil {
		// Do not allow the exporting of passwords.
		delete(release.Env, "REDIS_PASSWORD")

		if err := tw.WriteJSON("release.json", release); err != nil {
			return fmt.Errorf("error exporting release: %s", err)
		}
	}

	if artifactID := release.ImageArtifactID(); artifactID != "" {
		artifact, err := client.GetArtifact(artifactID)
		if err != nil && err != controller.ErrNotFound {
			return fmt.Errorf("error retrieving artifact: %s", err)
		} else if err == nil {
			if err := tw.WriteJSON("artifact.json", artifact); err != nil {
				return fmt.Errorf("error exporting artifact: %s", err)
			}
		}
	}

	formation, err := client.GetFormation(mustApp(), release.ID)
	if err != nil && err != controller.ErrNotFound {
		return fmt.Errorf("error retrieving formation: %s", err)
	} else if err == nil {
		if err := tw.WriteJSON("formation.json", formation); err != nil {
			return fmt.Errorf("error exporting formation: %s", err)
		}
	}

	// expect releases deployed via git to have a slug as their first file
	// artifact, and legacy releases to have SLUG_URL set
	var slugURL string
	if release.IsGitDeploy() && len(release.FileArtifactIDs()) > 0 {
		slugArtifact, err := client.GetArtifact(release.FileArtifactIDs()[0])
		if err != nil && err != controller.ErrNotFound {
			return fmt.Errorf("error retrieving slug artifact: %s", err)
		} else if err == nil {
			slugURL = slugArtifact.URI
		}
	} else if u, ok := release.Env["SLUG_URL"]; ok {
		slugURL = u
	}
	if slugURL != "" {
		reqR, reqW := io.Pipe()
		config := runConfig{
			App:        mustApp(),
			Release:    release.ID,
			DisableLog: true,
			Entrypoint: []string{"curl"},
			Args:       []string{"--include", "--raw", slugURL},
			Stdout:     reqW,
			Stderr:     ioutil.Discard,
		}
		if bar != nil {
			config.Stdout = io.MultiWriter(config.Stdout, bar)
		}
		go func() {
			if err := runJob(client, config); err != nil {
				shutdown.Fatalf("error retrieving slug: %s", err)
			}
		}()
		res, err := http.ReadResponse(bufio.NewReader(reqR), nil)
		if err != nil {
			return fmt.Errorf("error reading slug response: %s", err)
		}
		if res.StatusCode != 200 {
			return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode)
		}
		length, err := strconv.Atoi(res.Header.Get("Content-Length"))
		if err != nil {
			return fmt.Errorf("slug has missing or malformed Content-Length")
		}

		if err := tw.WriteHeader("slug.tar.gz", length); err != nil {
			return fmt.Errorf("error writing slug header: %s", err)
		}
		if _, err := io.Copy(tw, res.Body); err != nil {
			return fmt.Errorf("error writing slug: %s", err)
		}
		res.Body.Close()
	}

	if pgConfig, err := getAppPgRunConfig(client); err == nil {
		configPgDump(pgConfig)
		if err := tw.WriteCommandOutput(client, "postgres.dump", pgConfig.App, &ct.NewJob{
			ReleaseID:  pgConfig.Release,
			Entrypoint: pgConfig.Entrypoint,
			Cmd:        pgConfig.Args,
			Env:        pgConfig.Env,
			DisableLog: pgConfig.DisableLog,
		}); err != nil {
			return fmt.Errorf("error creating postgres dump: %s", err)
		}
	}

	if mysqlConfig, err := getAppMysqlRunConfig(client); err == nil {
		configMysqlDump(mysqlConfig)
		if err := tw.WriteCommandOutput(client, "mysql.dump", mysqlConfig.App, &ct.NewJob{
			ReleaseID:  mysqlConfig.Release,
			Entrypoint: mysqlConfig.Entrypoint,
			Cmd:        mysqlConfig.Args,
			Env:        mysqlConfig.Env,
			DisableLog: mysqlConfig.DisableLog,
		}); err != nil {
			return fmt.Errorf("error creating mysql dump: %s", err)
		}
	}

	return nil
}