func (u *Updater) update() error { up := update.New() if err := up.CanUpdate(); err != nil { return err } if err := os.MkdirAll(updateDir, 0755); err != nil { return err } local, err := tuf.FileLocalStore(filepath.Join(updateDir, "tuf.db")) if err != nil { return err } plat := fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH) opts := &tuf.HTTPRemoteOptions{ UserAgent: fmt.Sprintf("flynn-cli/%s %s", version.String(), plat), Retries: tuf.DefaultHTTPRetries, } remote, err := tuf.HTTPRemoteStore(u.repo, opts) if err != nil { return err } client := tuf.NewClient(local, remote) if err := u.updateTUFClient(client); err != nil { return err } name := fmt.Sprintf("/flynn-%s.gz", plat) latestVersion, err := tufutil.GetVersion(client, name) if err != nil { return err } if latestVersion == version.String() { return nil } bin := &tufBuffer{} if err := client.Download(name, bin); err != nil { return err } gr, err := gzip.NewReader(bin) if err != nil { return err } err, errRecover := up.FromStream(gr) if errRecover != nil { return fmt.Errorf("update and recovery errors: %q %q", err, errRecover) } if err != nil { return err } log.Printf("Updated %s -> %s.", version.String(), latestVersion) return nil }
func (h *jobAPI) PullBinariesAndConfig(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { log := h.host.log.New("fn", "PullBinariesAndConfig") log.Info("extracting TUF database") tufDB, err := extractTufDB(r) if err != nil { log.Error("error extracting TUF database", "err", err) httphelper.Error(w, err) return } defer os.Remove(tufDB) query := r.URL.Query() log.Info("creating local TUF store") local, err := tuf.FileLocalStore(tufDB) if err != nil { log.Error("error creating local TUF store", "err", err) httphelper.Error(w, err) return } opts := &tuf.HTTPRemoteOptions{ UserAgent: fmt.Sprintf("flynn-host/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH), Retries: tuf.DefaultHTTPRetries, } log.Info("creating remote TUF store") remote, err := tuf.HTTPRemoteStore(query.Get("repository"), opts) if err != nil { log.Error("error creating remote TUF store", "err", err) httphelper.Error(w, err) return } client := tuf.NewClient(local, remote) d := downloader.New(client, query.Get("version")) log.Info("downloading binaries") paths, err := d.DownloadBinaries(query.Get("bin-dir")) if err != nil { log.Error("error downloading binaries", "err", err) httphelper.Error(w, err) return } log.Info("downloading config") configs, err := d.DownloadConfig(query.Get("config-dir")) if err != nil { log.Error("error downloading config", "err", err) httphelper.Error(w, err) return } for k, v := range configs { paths[k] = v } httphelper.JSON(w, 200, paths) }
func newTufClient(tufDB, repository string) (*tuf.Client, error) { local, err := tuf.FileLocalStore(tufDB) if err != nil { return nil, err } opts := &tuf.HTTPRemoteOptions{ UserAgent: fmt.Sprintf("flynn-host/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH), Retries: tufutil.DefaultHTTPRetries, } remote, err := tuf.HTTPRemoteStore(repository, opts) if err != nil { return nil, err } return tuf.NewClient(local, remote), nil }
func PullImages(tufDB, repository, driver, root, ver string, progress chan<- layer.PullInfo) error { local, err := tuf.FileLocalStore(tufDB) if err != nil { return err } opts := &tuf.HTTPRemoteOptions{ UserAgent: fmt.Sprintf("pinkerton/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH), Retries: tuf.DefaultHTTPRetries, } remote, err := tuf.HTTPRemoteStore(repository, opts) if err != nil { return err } return PullImagesWithClient(tuf.NewClient(local, remote), repository, driver, root, ver, progress) }
func tufClient(args *docopt.Args) (*tuf.Client, error) { store, ok := args.String["--store"] if !ok { store = args.String["-s"] } local, err := tuf.FileLocalStore(store) if err != nil { return nil, err } remote, err := tuf.HTTPRemoteStore(args.String["<url>"], nil) if err != nil { return nil, err } return tuf.NewClient(local, remote), nil }
func newTUFClient(uri, tufDB string) (*tuf.Client, error) { u, err := url.Parse(uri) if err != nil { return nil, err } baseURL := &url.URL{Scheme: u.Scheme, Host: u.Host, Path: u.Path} remote, err := tuf.HTTPRemoteStore(baseURL.String(), nil) if err != nil { return nil, err } local, err := tuf.FileLocalStore(tufDB) if err != nil { return nil, err } client := tuf.NewClient(local, remote) if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { return nil, err } return client, nil }
func runDownload(args *docopt.Args) error { log := log15.New() log.Info("initializing ZFS volumes") volPath := args.String["--volpath"] volDB := filepath.Join(volPath, "volumes.bolt") volMan := volumemanager.New(volDB, log, func() (volume.Provider, error) { return zfs.NewProvider(&zfs.ProviderConfig{ DatasetName: zfs.DefaultDatasetName, Make: zfs.DefaultMakeDev(volPath, log), WorkingDir: filepath.Join(volPath, "zfs"), }) }) if err := volMan.OpenDB(); err != nil { log.Error("error opening volume database, make sure flynn-host is not running", "err", err) return err } // create a TUF client and update it log.Info("initializing TUF client") tufDB := args.String["--tuf-db"] local, err := tuf.FileLocalStore(tufDB) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("downloader")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) if err := updateTUFClient(client); err != nil { log.Error("error updating TUF client", "err", err) return err } configDir := args.String["--config-dir"] requestedVersion := os.Getenv("FLYNN_VERSION") if requestedVersion == "" { requestedVersion, err = getChannelVersion(configDir, client, log) if err != nil { return err } } log.Info(fmt.Sprintf("downloading components with version %s", requestedVersion)) d := downloader.New(client, volMan, requestedVersion) binDir := args.String["--bin-dir"] log.Info(fmt.Sprintf("downloading binaries to %s", binDir)) if _, err := d.DownloadBinaries(binDir); err != nil { log.Error("error downloading binaries", "err", err) return err } // use the requested version of flynn-host to download the images as // the format changed in v20161106 if version.String() != requestedVersion { log.Info(fmt.Sprintf("executing %s flynn-host binary", requestedVersion)) binPath := filepath.Join(binDir, "flynn-host") argv := append([]string{binPath}, os.Args[1:]...) return syscall.Exec(binPath, argv, os.Environ()) } log.Info("downloading images") ch := make(chan *ct.ImagePullInfo) go func() { for info := range ch { switch info.Type { case ct.ImagePullTypeImage: log.Info(fmt.Sprintf("pulling %s image", info.Name)) case ct.ImagePullTypeLayer: log.Info(fmt.Sprintf("pulling %s layer %s (%s)", info.Name, info.Layer.ID, units.BytesSize(float64(info.Layer.Length)))) } } }() if err := d.DownloadImages(configDir, ch); err != nil { log.Error("error downloading images", "err", err) return err } log.Info(fmt.Sprintf("downloading config to %s", configDir)) if _, err := d.DownloadConfig(configDir); err != nil { log.Error("error downloading config", "err", err) return err } log.Info("download complete") return nil }
func runDownload(args *docopt.Args) error { if err := os.MkdirAll(args.String["--root"], 0755); err != nil { return fmt.Errorf("error creating root dir: %s", err) } log := log15.New() // create a TUF client and update it log.Info("initializing TUF client") tufDB := args.String["--tuf-db"] local, err := tuf.FileLocalStore(tufDB) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("downloader")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) if err := updateTUFClient(client); err != nil { log.Error("error updating TUF client", "err", err) return err } configDir := args.String["--config-dir"] version := os.Getenv("FLYNN_VERSION") if version == "" { version, err = getChannelVersion(configDir, client, log) if err != nil { return err } } log.Info(fmt.Sprintf("downloading components with version %s", version)) log.Info("downloading images") if err := pinkerton.PullImagesWithClient( client, args.String["--repository"], args.String["--driver"], args.String["--root"], version, pinkerton.InfoPrinter(false), ); err != nil { return err } d := downloader.New(client, version) log.Info(fmt.Sprintf("downloading config to %s", configDir)) if _, err := d.DownloadConfig(configDir); err != nil { log.Error("error downloading config", "err", err) return err } binDir := args.String["--bin-dir"] log.Info(fmt.Sprintf("downloading binaries to %s", binDir)) if _, err := d.DownloadBinaries(binDir); err != nil { log.Error("error downloading binaries", "err", err) return err } log.Info("download complete") return nil }
func runUpdate(args *docopt.Args) error { log := log15.New() // create and update a TUF client log.Info("initializing TUF client") local, err := tuf.FileLocalStore(args.String["--tuf-db"]) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("updater")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) if !args.Bool["--is-latest"] { return updateAndExecLatest(args.String["--config-dir"], client, log) } // unlink the current binary if it is a temp file if args.Bool["--is-tempfile"] { os.Remove(os.Args[0]) } // read the TUF db so we can pass it to hosts log.Info("reading TUF database") tufDB, err := ioutil.ReadFile(args.String["--tuf-db"]) if err != nil { log.Error("error reading the TUF database", "err", err) return err } log.Info("getting host list") clusterClient := cluster.NewClient() hosts, err := clusterClient.Hosts() if err != nil { log.Error("error getting host list", "err", err) return err } if len(hosts) == 0 { return errors.New("no hosts found") } log.Info(fmt.Sprintf("updating %d hosts", len(hosts))) // eachHost invokes the given function in a goroutine for each host, // returning an error if any of the functions returns an error. eachHost := func(f func(*cluster.Host, log15.Logger) error) (err error) { errs := make(chan error) for _, h := range hosts { go func(host *cluster.Host) { log := log.New("host", host.ID()) errs <- f(host, log) }(h) } for range hosts { if e := <-errs; e != nil { err = e } } return } log.Info("checking host version compatibility") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { status, err := host.GetStatus() if err != nil { log.Error("error getting host status", "err", err) return err } v := version.Parse(status.Version) if v.Before(version.Parse(minVersion)) && !v.Dev { log.Error(ErrIncompatibleVersion.Error(), "version", status.Version) return ErrIncompatibleVersion } return nil }); err != nil { return err } var mtx sync.Mutex images := make(map[string]string) log.Info("pulling latest images on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { log.Info("pulling images") ch := make(chan *layer.PullInfo) stream, err := host.PullImages( args.String["--repository"], args.String["--driver"], args.String["--root"], version.String(), bytes.NewReader(tufDB), ch, ) if err != nil { log.Error("error pulling images", "err", err) return err } defer stream.Close() for info := range ch { if info.Type == layer.TypeLayer { continue } log.Info("pulled image", "name", info.Repo) imageURI := fmt.Sprintf("%s?name=%s&id=%s", args.String["--repository"], info.Repo, info.ID) mtx.Lock() images[info.Repo] = imageURI mtx.Unlock() } if err := stream.Err(); err != nil { log.Error("error pulling images", "err", err) return err } return nil }); err != nil { return err } var binaries map[string]string log.Info("pulling latest binaries and config on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { log.Info("pulling binaries and config") paths, err := host.PullBinariesAndConfig( args.String["--repository"], args.String["--bin-dir"], args.String["--config-dir"], version.String(), bytes.NewReader(tufDB), ) if err != nil { log.Error("error pulling binaries and config", "err", err) return err } mtx.Lock() binaries = paths mtx.Unlock() log.Info("binaries and config pulled successfully") return nil }); err != nil { return err } log.Info("validating binaries") flynnHost, ok := binaries["flynn-host"] if !ok { return fmt.Errorf("missing flynn-host binary") } flynnInit, ok := binaries["flynn-init"] if !ok { return fmt.Errorf("missing flynn-init binary") } log.Info("updating flynn-host daemon on all hosts") if err := eachHost(func(host *cluster.Host, log log15.Logger) error { // TODO(lmars): handle daemons using custom flags (e.g. --state=/foo) _, err := host.Update( flynnHost, "daemon", "--id", host.ID(), "--flynn-init", flynnInit, ) if err != nil { log.Error("error updating binaries", "err", err) return err } log.Info("flynn-host updated successfully") return nil }); err != nil { return err } updaterImage, ok := images["flynn/updater"] if !ok { e := "missing flynn/updater image" log.Error(e) return errors.New(e) } imageJSON, err := json.Marshal(images) if err != nil { log.Error("error encoding images", "err", err) return err } // use a flag to determine whether to use a TTY log formatter because actually // assigning a TTY to the job causes reading images via stdin to fail. cmd := exec.Command(exec.DockerImage(updaterImage), "/bin/updater", fmt.Sprintf("--tty=%t", term.IsTerminal(os.Stdout.Fd()))) cmd.Stdin = bytes.NewReader(imageJSON) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } log.Info("update complete") return nil }