func runPgRestore(args *docopt.Args, client *controller.Client, config *runConfig) error { config.Stdin = os.Stdin var size int64 if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return err } defer f.Close() stat, err := f.Stat() if err != nil { return err } size = stat.Size() config.Stdin = f } if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar := pb.New(0) bar.SetUnits(pb.U_BYTES) if size > 0 { bar.Total = size } else { bar.ShowBar = false } bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() config.Stdin = bar.NewProxyReader(config.Stdin) } return pgRestore(client, config) }
func runRedisDump(args *docopt.Args, client controller.Client, config *runConfig) error { config.Stdout = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() config.Stdout = f } if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar := pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() config.Stdout = io.MultiWriter(config.Stdout, bar) } config.Entrypoint = []string{"/bin/dump-flynn-redis"} return runJob(client, *config) }
func (g *Gist) Upload(log log15.Logger) error { if len(g.Files) == 0 { return errors.New("cannot create empty gist") } payload, err := json.Marshal(g) if err != nil { log.Error("error preparing gist content", "err", err) return err } var body io.Reader = bytes.NewReader(payload) if term.IsTerminal(os.Stderr.Fd()) { bar := pb.New(len(payload)) bar.SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() body = bar.NewProxyReader(body) } req, err := http.NewRequest("POST", "https://api.github.com/gists", body) if err != nil { log.Error("error preparing HTTP request", "err", err) return err } req.Header.Set("Content-Type", "application/json") log.Info("creating anonymous gist") res, err := http.DefaultClient.Do(req) if err != nil { log.Error("error uploading gist content", "err", err) return err } defer res.Body.Close() if res.StatusCode != http.StatusCreated { e := fmt.Sprintf("unexpected HTTP status: %d", res.StatusCode) log.Error(e) return errors.New(e) } if err := json.NewDecoder(res.Body).Decode(g); err != nil { log.Error("error decoding HTTP response", "err", err) return err } return nil }
func runClusterBackup(args *docopt.Args) error { client, err := getClusterClient() if err != nil { return err } var bar *pb.ProgressBar if term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() } var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() dest = f } fmt.Fprintln(os.Stderr, "Creating cluster backup...") if bar != nil { dest = io.MultiWriter(dest, bar) } if err := backup.Run(client, dest); err != nil { return err } if bar != nil { bar.Finish() } fmt.Fprintln(os.Stderr, "Backup complete.") return nil }
func runRun(args *docopt.Args, client *cluster.Client) error { cmd := exec.Cmd{ Artifact: exec.DockerImage(args.String["<image>"]), Job: &host.Job{ Config: host.ContainerConfig{ Entrypoint: []string{args.String["<command>"]}, Cmd: args.All["<argument>"].([]string), TTY: term.IsTerminal(os.Stdin.Fd()) && term.IsTerminal(os.Stdout.Fd()), Stdin: true, }, }, HostID: args.String["--host"], Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, } if cmd.Job.Config.TTY { ws, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { return err } cmd.TermHeight = ws.Height cmd.TermWidth = ws.Width cmd.Env = map[string]string{ "COLUMNS": strconv.Itoa(int(ws.Width)), "LINES": strconv.Itoa(int(ws.Height)), "TERM": os.Getenv("TERM"), } } if specs := args.String["--bind"]; specs != "" { mounts := strings.Split(specs, ",") cmd.Job.Config.Mounts = make([]host.Mount, len(mounts)) for i, m := range mounts { s := strings.SplitN(m, ":", 2) cmd.Job.Config.Mounts[i] = host.Mount{ Target: s[0], Location: s[1], Writeable: true, } } } var termState *term.State if cmd.Job.Config.TTY { var err error termState, err = term.MakeRaw(os.Stdin.Fd()) if err != nil { return err } // Restore the terminal if we return without calling os.Exit defer term.RestoreTerminal(os.Stdin.Fd(), termState) go func() { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGWINCH) for range ch { ws, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { return } cmd.ResizeTTY(ws.Height, ws.Width) cmd.Signal(int(syscall.SIGWINCH)) } }() } go func() { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) sig := <-ch cmd.Signal(int(sig.(syscall.Signal))) time.Sleep(10 * time.Second) cmd.Signal(int(syscall.SIGKILL)) }() err := cmd.Run() if status, ok := err.(exec.ExitError); ok { if cmd.Job.Config.TTY { // The deferred restore doesn't happen due to the exit below term.RestoreTerminal(os.Stdin.Fd(), termState) } os.Exit(int(status)) } return err }
func runExport(args *docopt.Args, client *controller.Client) error { var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return fmt.Errorf("error creating export file: %s", err) } defer f.Close() dest = f } app, err := client.GetApp(mustApp()) if err != nil { return fmt.Errorf("error getting app: %s", err) } tw := backup.NewTarWriter(app.Name, dest) defer tw.Close() if err := tw.WriteJSON("app.json", app); err != nil { return fmt.Errorf("error exporting app: %s", err) } routes, err := client.RouteList(mustApp()) if err != nil { return fmt.Errorf("error getting routes: %s", err) } if err := tw.WriteJSON("routes.json", routes); err != nil { return fmt.Errorf("error exporting routes: %s", err) } release, err := client.GetAppRelease(mustApp()) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving app: %s", err) } else if err == nil { // Do not allow the exporting of passwords. delete(release.Env, "REDIS_PASSWORD") if err := tw.WriteJSON("release.json", release); err != nil { return fmt.Errorf("error exporting release: %s", err) } } artifact, err := client.GetArtifact(release.ArtifactID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving artifact: %s", err) } else if err == nil { if err := tw.WriteJSON("artifact.json", artifact); err != nil { return fmt.Errorf("error exporting artifact: %s", err) } } formation, err := client.GetFormation(mustApp(), release.ID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving formation: %s", err) } else if err == nil { if err := tw.WriteJSON("formation.json", formation); err != nil { return fmt.Errorf("error exporting formation: %s", err) } } var bar *pb.ProgressBar if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if slug, ok := release.Env["SLUG_URL"]; ok { reqR, reqW := io.Pipe() config := runConfig{ App: mustApp(), Release: release.ID, DisableLog: true, Entrypoint: []string{"curl"}, Args: []string{"--include", "--raw", slug}, Stdout: reqW, Stderr: ioutil.Discard, } if bar != nil { config.Stdout = io.MultiWriter(config.Stdout, bar) } go func() { if err := runJob(client, config); err != nil { shutdown.Fatalf("error retrieving slug: %s", err) } }() res, err := http.ReadResponse(bufio.NewReader(reqR), nil) if err != nil { return fmt.Errorf("error reading slug response: %s", err) } if res.StatusCode != 200 { return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode) } length, err := strconv.Atoi(res.Header.Get("Content-Length")) if err != nil { return fmt.Errorf("slug has missing or malformed Content-Length") } if err := tw.WriteHeader("slug.tar.gz", length); err != nil { return fmt.Errorf("error writing slug header: %s", err) } if _, err := io.Copy(tw, res.Body); err != nil { return fmt.Errorf("error writing slug: %s", err) } res.Body.Close() } if config, err := getAppPgRunConfig(client); err == nil { configPgDump(config) if err := tw.WriteCommandOutput(client, "postgres.dump", config.App, &ct.NewJob{ ReleaseID: config.Release, Entrypoint: config.Entrypoint, Cmd: config.Args, Env: config.Env, DisableLog: config.DisableLog, }); err != nil { return fmt.Errorf("error creating postgres dump: %s", err) } } return nil }
func runImport(args *docopt.Args, client *controller.Client) error { var src io.Reader = os.Stdin if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return fmt.Errorf("error opening export file: %s", err) } defer f.Close() src = f } tr := tar.NewReader(src) var ( app *ct.App release *ct.Release artifact *ct.Artifact formation *ct.Formation routes []router.Route slug io.Reader pgDump io.Reader uploadSize int64 ) numResources := 0 numRoutes := 1 for { header, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("error reading export tar: %s", err) } switch path.Base(header.Name) { case "app.json": app = &ct.App{} if err := json.NewDecoder(tr).Decode(app); err != nil { return fmt.Errorf("error decoding app: %s", err) } app.ID = "" case "release.json": release = &ct.Release{} if err := json.NewDecoder(tr).Decode(release); err != nil { return fmt.Errorf("error decoding release: %s", err) } release.ID = "" release.ArtifactID = "" case "artifact.json": artifact = &ct.Artifact{} if err := json.NewDecoder(tr).Decode(artifact); err != nil { return fmt.Errorf("error decoding artifact: %s", err) } artifact.ID = "" case "formation.json": formation = &ct.Formation{} if err := json.NewDecoder(tr).Decode(formation); err != nil { return fmt.Errorf("error decoding formation: %s", err) } formation.AppID = "" formation.ReleaseID = "" case "routes.json": if err := json.NewDecoder(tr).Decode(&routes); err != nil { return fmt.Errorf("error decoding routes: %s", err) } for _, route := range routes { route.ID = "" route.ParentRef = "" } case "slug.tar.gz": f, err := ioutil.TempFile("", "slug.tar.gz") if err != nil { return fmt.Errorf("error creating slug tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading slug: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking slug tempfile: %s", err) } slug = f uploadSize += header.Size case "postgres.dump": f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } pgDump = f uploadSize += header.Size } } if app == nil { return fmt.Errorf("missing app.json") } oldName := app.Name if name := args.String["--name"]; name != "" { app.Name = name } if err := client.CreateApp(app); err != nil { return fmt.Errorf("error creating app: %s", err) } var bar *pb.ProgressBar if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.Total = uploadSize bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if pgDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "postgres", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning postgres resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getPgRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting postgres config: %s", err) } config.Stdin = pgDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := pgRestore(client, config); err != nil { return fmt.Errorf("error restoring postgres database: %s", err) } } if release != nil && release.Env["FLYNN_REDIS"] != "" { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "redis", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning redis resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } } uploadSlug := release != nil && release.Env["SLUG_URL"] != "" && artifact != nil && slug != nil if uploadSlug { // Use current slugrunner as the artifact gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } artifact = &ct.Artifact{ Type: "docker", URI: gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"], } if artifact.URI == "" { return fmt.Errorf("gitreceive env missing SLUGRUNNER_IMAGE_URI") } release.Env["SLUG_URL"] = fmt.Sprintf("http://blobstore.discoverd/%s.tgz", random.UUID()) } if artifact != nil { if err := client.CreateArtifact(artifact); err != nil { return fmt.Errorf("error creating artifact: %s", err) } release.ArtifactID = artifact.ID } if release != nil { for t, proc := range release.Processes { for i, port := range proc.Ports { if port.Service != nil && port.Service.Name == oldName+"-web" { proc.Ports[i].Service.Name = app.Name + "-web" } } release.Processes[t] = proc } if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if uploadSlug { config := runConfig{ App: app.ID, Release: release.ID, DisableLog: true, Entrypoint: []string{"curl"}, Args: []string{"--request", "PUT", "--upload-file", "-", release.Env["SLUG_URL"]}, Stdin: slug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } } if formation != nil && release != nil { formation.ReleaseID = release.ID formation.AppID = app.ID if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error creating formation: %s", err) } } if args.Bool["--routes"] { for _, route := range routes { if err := client.CreateRoute(app.ID, &route); err != nil { if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode { // If the cluster domain matches then the default route // exported will conflict with the one created automatically. continue } return fmt.Errorf("error creating route: %s", err) } numRoutes++ } } fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources) return nil }
func runExport(args *docopt.Args, client *controller.Client) error { var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return fmt.Errorf("error creating export file: %s", err) } defer f.Close() dest = f } tw := tar.NewWriter(dest) defer tw.Close() uid := syscall.Getuid() header := func(name string, length int) error { return tw.WriteHeader(&tar.Header{ Name: path.Join(mustApp(), name), Mode: 0644, Size: int64(length), ModTime: time.Now(), Typeflag: tar.TypeReg, Uid: uid, Gid: uid, }) } writeJSON := func(name string, v interface{}) error { data, err := json.MarshalIndent(v, "", " ") if err != nil { return err } if err := header(name, len(data)+1); err != nil { return err } if _, err := tw.Write(data); err != nil { return err } if _, err := tw.Write([]byte("\n")); err != nil { return err } return nil } app, err := client.GetApp(mustApp()) if err != nil { return fmt.Errorf("error getting app: %s", err) } if err := writeJSON("app.json", app); err != nil { return fmt.Errorf("error exporting app: %s", err) } release, err := client.GetAppRelease(mustApp()) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving app: %s", err) } else if err == nil { if err := writeJSON("release.json", release); err != nil { return fmt.Errorf("error exporting release: %s", err) } } artifact, err := client.GetArtifact(release.ArtifactID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving artifact: %s", err) } else if err == nil { if err := writeJSON("artifact.json", artifact); err != nil { return fmt.Errorf("error exporting artifact: %s", err) } } formation, err := client.GetFormation(mustApp(), release.ID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving formation: %s", err) } else if err == nil { if err := writeJSON("formation.json", formation); err != nil { return fmt.Errorf("error exporting formation: %s", err) } } var bar *pb.ProgressBar if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if slug, ok := release.Env["SLUG_URL"]; ok { reqR, reqW := io.Pipe() config := runConfig{ App: mustApp(), Release: release.ID, DisableLog: true, Entrypoint: []string{"curl"}, Args: []string{"--include", "--raw", slug}, Stdout: reqW, Stderr: ioutil.Discard, } if bar != nil { config.Stdout = io.MultiWriter(config.Stdout, bar) } go func() { if err := runJob(client, config); err != nil { shutdown.Fatalf("error retrieving slug: %s", err) } }() res, err := http.ReadResponse(bufio.NewReader(reqR), nil) if err != nil { return fmt.Errorf("error reading slug response: %s", err) } if res.StatusCode != 200 { return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode) } length, err := strconv.Atoi(res.Header.Get("Content-Length")) if err != nil { return fmt.Errorf("slug has missing or malformed Content-Length") } if err := header("slug.tar.gz", length); err != nil { return fmt.Errorf("error writing slug header: %s", err) } if _, err := io.Copy(tw, res.Body); err != nil { return fmt.Errorf("error writing slug: %s", err) } res.Body.Close() } if config, err := getAppPgRunConfig(client); err == nil { f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db temp file: %s", err) } defer f.Close() defer os.Remove(f.Name()) config.Stdout = f config.Exit = false if bar != nil { config.Stdout = io.MultiWriter(config.Stdout, bar) } if err := pgDump(client, config); err != nil { return fmt.Errorf("error dumping database: %s", err) } length, err := f.Seek(0, os.SEEK_CUR) if err != nil { return fmt.Errorf("error getting db size: %s", err) } if err := header("postgres.dump", int(length)); err != nil { return fmt.Errorf("error writing db header: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db dump: %s", err) } if _, err := io.Copy(tw, f); err != nil { return fmt.Errorf("error exporting db: %s", err) } } return nil }
func NewDockerCli(in io.ReadCloser, out, err io.Writer, key libtrust.PrivateKey, proto, addr string, tlsConfig *tls.Config) *DockerCli { var ( inFd uintptr outFd uintptr isTerminalIn = false isTerminalOut = false scheme = "http" ) if tlsConfig != nil { scheme = "https" } if in != nil { if file, ok := in.(*os.File); ok { inFd = file.Fd() isTerminalIn = term.IsTerminal(inFd) } } if out != nil { if file, ok := out.(*os.File); ok { outFd = file.Fd() isTerminalOut = term.IsTerminal(outFd) } } if err == nil { err = out } // The transport is created here for reuse during the client session tr := &http.Transport{ TLSClientConfig: tlsConfig, } // Why 32? See issue 8035 timeout := 32 * time.Second if proto == "unix" { // no need in compressing for local communications tr.DisableCompression = true tr.Dial = func(_, _ string) (net.Conn, error) { return net.DialTimeout(proto, addr, timeout) } } else { tr.Dial = (&net.Dialer{Timeout: timeout}).Dial } return &DockerCli{ proto: proto, addr: addr, in: in, out: out, err: err, key: key, inFd: inFd, outFd: outFd, isTerminalIn: isTerminalIn, isTerminalOut: isTerminalOut, tlsConfig: tlsConfig, scheme: scheme, transport: tr, } }
func runUpdate(args *docopt.Args) error { log := log15.New() // create and update a TUF client log.Info("initializing TUF client") local, err := tuf.FileLocalStore(args.String["--tuf-db"]) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("updater")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) if !args.Bool["--is-latest"] { return updateAndExecLatest(args.String["--config-dir"], client, log) } // unlink the current binary if it is a temp file if args.Bool["--is-tempfile"] { os.Remove(os.Args[0]) } // read the TUF db so we can pass it to hosts log.Info("reading TUF database") tufDB, err := ioutil.ReadFile(args.String["--tuf-db"]) if err != nil { log.Error("error reading the TUF database", "err", err) return err } log.Info("getting host list") clusterClient := cluster.NewClient() hosts, err := clusterClient.Hosts() if err != nil { log.Error("error getting host list", "err", err) return err } if len(hosts) == 0 { return errors.New("no hosts found") } log.Info(fmt.Sprintf("updating %d hosts", len(hosts))) // eachHost invokes the given function in a goroutine for each host, // returning an error if any of the functions returns an error. eachHost := func(f func(*cluster.Host, log15.Logger) error) (err error) { errs := make(chan error) for _, h := range hosts { go func(host *cluster.Host) { log := log.New("host", host.ID()) errs <- f(host, log) }(h) } for range hosts { if e := <-errs; e != nil { err = e } } return } var mtx sync.Mutex images := make(map[string]string) log.Info("pulling latest images on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { log.Info("pulling images") ch := make(chan *layer.PullInfo) stream, err := host.PullImages( args.String["--repository"], args.String["--driver"], args.String["--root"], version.String(), bytes.NewReader(tufDB), ch, ) if err != nil { log.Error("error pulling images", "err", err) return err } defer stream.Close() for info := range ch { if info.Type == layer.TypeLayer { continue } log.Info("pulled image", "name", info.Repo) imageURI := fmt.Sprintf("%s?name=%s&id=%s", args.String["--repository"], info.Repo, info.ID) mtx.Lock() images[info.Repo] = imageURI mtx.Unlock() } if err := stream.Err(); err != nil { log.Error("error pulling images", "err", err) return err } return nil }); err != nil { return err } var binaries map[string]string log.Info("pulling latest binaries and config on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { log.Info("pulling binaries and config") paths, err := host.PullBinariesAndConfig( args.String["--repository"], args.String["--bin-dir"], args.String["--config-dir"], version.String(), bytes.NewReader(tufDB), ) if err != nil { log.Error("error pulling binaries and config", "err", err) return err } mtx.Lock() binaries = paths mtx.Unlock() log.Info("binaries and config pulled successfully") return nil }); err != nil { return err } log.Info("validating binaries") flynnHost, ok := binaries["flynn-host"] if !ok { return fmt.Errorf("missing flynn-host binary") } flynnInit, ok := binaries["flynn-init"] if !ok { return fmt.Errorf("missing flynn-init binary") } flynnNSUmount, ok := binaries["flynn-nsumount"] if !ok { return fmt.Errorf("missing flynn-nsumount binary") } log.Info("updating flynn-host daemon on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { // TODO(lmars): handle daemons using custom flags (e.g. --state=/foo) _, err := host.Update( flynnHost, "daemon", "--id", host.ID(), "--flynn-init", flynnInit, "--nsumount", flynnNSUmount, ) if err != nil { log.Error("error updating binaries", "err", err) return err } log.Info("flynn-host updated successfully") return nil }); err != nil { return err } updaterImage, ok := images["flynn/updater"] if !ok { e := "missing flynn/updater image" log.Error(e) return errors.New(e) } imageJSON, err := json.Marshal(images) if err != nil { log.Error("error encoding images", "err", err) return err } // use a flag to determine whether to use a TTY log formatter because actually // assigning a TTY to the job causes reading images via stdin to fail. cmd := exec.Command(exec.DockerImage(updaterImage), fmt.Sprintf("--tty=%t", term.IsTerminal(os.Stdout.Fd()))) cmd.Stdin = bytes.NewReader(imageJSON) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } log.Info("update complete") return nil }
func runJob(client *controller.Client, config runConfig) error { req := &ct.NewJob{ Cmd: config.Args, TTY: config.Stdin == nil && config.Stdout == nil && term.IsTerminal(os.Stdin.Fd()) && term.IsTerminal(os.Stdout.Fd()) && !config.Detached, ReleaseID: config.Release, Entrypoint: config.Entrypoint, Env: config.Env, ReleaseEnv: config.ReleaseEnv, DisableLog: config.DisableLog, } if config.Stdin == nil { config.Stdin = os.Stdin } if config.Stdout == nil { config.Stdout = os.Stdout } if config.Stderr == nil { config.Stderr = os.Stderr } if req.TTY { if req.Env == nil { req.Env = make(map[string]string) } ws, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { return err } req.Columns = int(ws.Width) req.Lines = int(ws.Height) req.Env["COLUMNS"] = strconv.Itoa(int(ws.Width)) req.Env["LINES"] = strconv.Itoa(int(ws.Height)) req.Env["TERM"] = os.Getenv("TERM") } if config.Detached { job, err := client.RunJobDetached(config.App, req) if err != nil { return err } log.Println(job.ID) return nil } rwc, err := client.RunJobAttached(config.App, req) if err != nil { return err } defer rwc.Close() attachClient := cluster.NewAttachClient(rwc) var termState *term.State if req.TTY { termState, err = term.MakeRaw(os.Stdin.Fd()) if err != nil { return err } // Restore the terminal if we return without calling os.Exit defer term.RestoreTerminal(os.Stdin.Fd(), termState) go func() { ch := make(chan os.Signal, 1) signal.Notify(ch, SIGWINCH) for range ch { ws, err := term.GetWinsize(os.Stdin.Fd()) if err != nil { return } attachClient.ResizeTTY(ws.Height, ws.Width) attachClient.Signal(int(SIGWINCH)) } }() } go func() { ch := make(chan os.Signal, 1) signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) sig := <-ch attachClient.Signal(int(sig.(syscall.Signal))) time.Sleep(10 * time.Second) attachClient.Signal(int(syscall.SIGKILL)) }() go func() { io.Copy(attachClient, config.Stdin) attachClient.CloseWrite() }() childDone := make(chan struct{}) shutdown.BeforeExit(func() { <-childDone }) exitStatus, err := attachClient.Receive(config.Stdout, config.Stderr) close(childDone) if err != nil { return err } if req.TTY { term.RestoreTerminal(os.Stdin.Fd(), termState) } if config.Exit { shutdown.ExitWithCode(exitStatus) } if exitStatus != 0 { return RunExitError(exitStatus) } return nil }
func runUpdate(args *docopt.Args) error { log := log15.New() // create and update a TUF client log.Info("initializing TUF client") local, err := tuf.FileLocalStore(args.String["--tuf-db"]) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("updater")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) log.Info("updating TUF data") if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { log.Error("error updating TUF client", "err", err) return err } // read the TUF db so we can pass it to hosts log.Info("reading TUF database") tufDB, err := ioutil.ReadFile(args.String["--tuf-db"]) if err != nil { log.Error("error reading the TUF database", "err", err) return err } log.Info("getting host list") clusterClient := cluster.NewClient() hosts, err := clusterClient.Hosts() if err != nil { log.Error("error getting host list", "err", err) return err } if len(hosts) == 0 { return errors.New("no hosts found") } log.Info("pulling images on all hosts") images := make(map[string]string) var imageMtx sync.Mutex hostErrs := make(chan error) for _, h := range hosts { go func(host *cluster.Host) { log := log.New("host", host.ID()) log.Info("connecting to host") log.Info("pulling images") ch := make(chan *layer.PullInfo) stream, err := host.PullImages( args.String["--repository"], args.String["--driver"], args.String["--root"], bytes.NewReader(tufDB), ch, ) if err != nil { log.Error("error pulling images", "err", err) hostErrs <- err return } defer stream.Close() for info := range ch { if info.Type == layer.TypeLayer { continue } log.Info("pulled image", "name", info.Repo) imageURI := fmt.Sprintf("%s?name=%s&id=%s", args.String["--repository"], info.Repo, info.ID) imageMtx.Lock() images[info.Repo] = imageURI imageMtx.Unlock() } hostErrs <- stream.Err() }(h) } var hostErr error for _, h := range hosts { if err := <-hostErrs; err != nil { log.Error("error pulling images", "host", h.ID(), "err", err) hostErr = err continue } log.Info("images pulled successfully", "host", h.ID()) } if hostErr != nil { return hostErr } updaterImage, ok := images["flynn/updater"] if !ok { e := "missing flynn/updater image" log.Error(e) return errors.New(e) } imageJSON, err := json.Marshal(images) if err != nil { log.Error("error encoding images", "err", err) return err } // use a flag to determine whether to use a TTY log formatter because actually // assigning a TTY to the job causes reading images via stdin to fail. cmd := exec.Command(exec.DockerImage(updaterImage), fmt.Sprintf("--tty=%t", term.IsTerminal(os.Stdout.Fd()))) cmd.Stdin = bytes.NewReader(imageJSON) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } log.Info("update complete") return nil }
func runClusterBackup(args *docopt.Args) error { client, err := getClusterClient() if err != nil { return err } var bar *pb.ProgressBar if term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() } var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() dest = f } fmt.Fprintln(os.Stderr, "Creating cluster backup...") tw := NewTarWriter("flynn-backup-"+time.Now().UTC().Format("2006-01-02_150405"), dest) defer tw.Close() // get app and release details for key apps data := make(map[string]*ct.ExpandedFormation, 4) for _, name := range []string{"postgres", "discoverd", "flannel", "controller"} { app, err := client.GetApp(name) if err != nil { return fmt.Errorf("error getting %s app details: %s", name, err) } release, err := client.GetAppRelease(app.ID) if err != nil { return fmt.Errorf("error getting %s app release: %s", name, err) } formation, err := client.GetFormation(app.ID, release.ID) if err != nil { return fmt.Errorf("error getting %s app formation: %s", name, err) } artifact, err := client.GetArtifact(release.ArtifactID) if err != nil { return fmt.Errorf("error getting %s app artifact: %s", name, err) } data[name] = &ct.ExpandedFormation{ App: app, Release: release, Artifact: artifact, Processes: formation.Processes, } } if err := tw.WriteJSON("flynn.json", data); err != nil { return err } config := &runConfig{ App: "postgres", Release: data["postgres"].Release.ID, Entrypoint: []string{"sh"}, Args: []string{"-c", "pg_dumpall --clean --if-exists | gzip -9"}, Env: map[string]string{ "PGHOST": "leader.postgres.discoverd", "PGUSER": "******", "PGPASSWORD": data["postgres"].Release.Env["PGPASSWORD"], }, DisableLog: true, } if err := tw.WriteCommandOutput(client, "postgres.sql.gz", config, bar); err != nil { return fmt.Errorf("error dumping database: %s", err) } if bar != nil { bar.Finish() } fmt.Fprintln(os.Stderr, "Backup complete.") return nil }
func runExport(args *docopt.Args, client controller.Client) error { var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return fmt.Errorf("error creating export file: %s", err) } defer f.Close() dest = f } app, err := client.GetApp(mustApp()) if err != nil { return fmt.Errorf("error getting app: %s", err) } var bar backup.ProgressBar if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { b := pb.New(0) b.SetUnits(pb.U_BYTES) b.ShowBar = false b.ShowSpeed = true b.Output = os.Stderr b.Start() defer b.Finish() bar = b } tw := backup.NewTarWriter(app.Name, dest, bar) defer tw.Close() if err := tw.WriteJSON("app.json", app); err != nil { return fmt.Errorf("error exporting app: %s", err) } routes, err := client.RouteList(mustApp()) if err != nil { return fmt.Errorf("error getting routes: %s", err) } if err := tw.WriteJSON("routes.json", routes); err != nil { return fmt.Errorf("error exporting routes: %s", err) } release, err := client.GetAppRelease(mustApp()) if err == controller.ErrNotFound { // if the app has no release then there is nothing more to export return nil } else if err != nil { return fmt.Errorf("error retrieving app: %s", err) } else if err == nil { // Do not allow the exporting of passwords. delete(release.Env, "REDIS_PASSWORD") if err := tw.WriteJSON("release.json", release); err != nil { return fmt.Errorf("error exporting release: %s", err) } } if artifactID := release.ImageArtifactID(); artifactID != "" { artifact, err := client.GetArtifact(artifactID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving artifact: %s", err) } else if err == nil { if err := tw.WriteJSON("artifact.json", artifact); err != nil { return fmt.Errorf("error exporting artifact: %s", err) } } } formation, err := client.GetFormation(mustApp(), release.ID) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving formation: %s", err) } else if err == nil { if err := tw.WriteJSON("formation.json", formation); err != nil { return fmt.Errorf("error exporting formation: %s", err) } } // expect releases deployed via git to have a slug as their first file // artifact, and legacy releases to have SLUG_URL set var slugURL string if release.IsGitDeploy() && len(release.FileArtifactIDs()) > 0 { slugArtifact, err := client.GetArtifact(release.FileArtifactIDs()[0]) if err != nil && err != controller.ErrNotFound { return fmt.Errorf("error retrieving slug artifact: %s", err) } else if err == nil { slugURL = slugArtifact.URI } } else if u, ok := release.Env["SLUG_URL"]; ok { slugURL = u } if slugURL != "" { reqR, reqW := io.Pipe() config := runConfig{ App: mustApp(), Release: release.ID, DisableLog: true, Entrypoint: []string{"curl"}, Args: []string{"--include", "--raw", slugURL}, Stdout: reqW, Stderr: ioutil.Discard, } if bar != nil { config.Stdout = io.MultiWriter(config.Stdout, bar) } go func() { if err := runJob(client, config); err != nil { shutdown.Fatalf("error retrieving slug: %s", err) } }() res, err := http.ReadResponse(bufio.NewReader(reqR), nil) if err != nil { return fmt.Errorf("error reading slug response: %s", err) } if res.StatusCode != 200 { return fmt.Errorf("unexpected status getting slug: %d", res.StatusCode) } length, err := strconv.Atoi(res.Header.Get("Content-Length")) if err != nil { return fmt.Errorf("slug has missing or malformed Content-Length") } if err := tw.WriteHeader("slug.tar.gz", length); err != nil { return fmt.Errorf("error writing slug header: %s", err) } if _, err := io.Copy(tw, res.Body); err != nil { return fmt.Errorf("error writing slug: %s", err) } res.Body.Close() } if pgConfig, err := getAppPgRunConfig(client); err == nil { configPgDump(pgConfig) if err := tw.WriteCommandOutput(client, "postgres.dump", pgConfig.App, &ct.NewJob{ ReleaseID: pgConfig.Release, Entrypoint: pgConfig.Entrypoint, Cmd: pgConfig.Args, Env: pgConfig.Env, DisableLog: pgConfig.DisableLog, }); err != nil { return fmt.Errorf("error creating postgres dump: %s", err) } } if mysqlConfig, err := getAppMysqlRunConfig(client); err == nil { configMysqlDump(mysqlConfig) if err := tw.WriteCommandOutput(client, "mysql.dump", mysqlConfig.App, &ct.NewJob{ ReleaseID: mysqlConfig.Release, Entrypoint: mysqlConfig.Entrypoint, Cmd: mysqlConfig.Args, Env: mysqlConfig.Env, DisableLog: mysqlConfig.DisableLog, }); err != nil { return fmt.Errorf("error creating mysql dump: %s", err) } } return nil }