// updateTUFClient updates the given client, initializing and re-running the // update if ErrNoRootKeys is returned. func updateTUFClient(client *tuf.Client) error { _, err := client.Update() if err == nil || tuf.IsLatestSnapshot(err) { return nil } if err == tuf.ErrNoRootKeys { if err := client.Init(rootKeys, len(rootKeys)); err != nil { return err } return updateTUFClient(client) } return err }
// updateAndExecLatest updates the tuf DB, downloads the latest flynn-host // binary to a temp file and execs it. // // Latest snapshot errors are ignored because, even though we may have the // latest snapshot, the cluster may not be fully up to date (a previous update // may have failed). func updateAndExecLatest(configDir string, client *tuf.Client, log log15.Logger) error { log.Info("updating TUF data") if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { log.Error("error updating TUF client", "err", err) return err } version, err := getChannelVersion(configDir, client, log) if err != nil { return err } log.Info(fmt.Sprintf("downloading %s flynn-host binary", version)) gzTmp, err := tufutil.Download(client, path.Join(version, "flynn-host.gz")) if err != nil { log.Error("error downloading latest flynn-host binary", "err", err) return err } defer gzTmp.Close() gz, err := gzip.NewReader(gzTmp) if err != nil { log.Error("error creating gzip reader", "err", err) return err } defer gz.Close() tmp, err := ioutil.TempFile("", "flynn-host") if err != nil { log.Error("error creating temp file", "err", err) return err } _, err = io.Copy(tmp, gz) tmp.Close() if err != nil { log.Error("error decompressing gzipped flynn-host binary", "err", err) return err } if err := os.Chmod(tmp.Name(), 0755); err != nil { log.Error("error setting executable bit on tmp file", "err", err) return err } log.Info("executing latest flynn-host binary") argv := []string{tmp.Name()} argv = append(argv, os.Args[1:]...) argv = append(argv, "--is-latest") argv = append(argv, "--is-tempfile") return syscall.Exec(tmp.Name(), argv, os.Environ()) }
func newTUFClient(uri, tufDB string) (*tuf.Client, error) { u, err := url.Parse(uri) if err != nil { return nil, err } baseURL := &url.URL{Scheme: u.Scheme, Host: u.Host, Path: u.Path} remote, err := tuf.HTTPRemoteStore(baseURL.String(), nil) if err != nil { return nil, err } local, err := tuf.FileLocalStore(tufDB) if err != nil { return nil, err } client := tuf.NewClient(local, remote) if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { return nil, err } return client, nil }
func runUpdate(args *docopt.Args) error { log := log15.New() // create and update a TUF client log.Info("initializing TUF client") local, err := tuf.FileLocalStore(args.String["--tuf-db"]) if err != nil { log.Error("error creating local TUF client", "err", err) return err } remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("updater")) if err != nil { log.Error("error creating remote TUF client", "err", err) return err } client := tuf.NewClient(local, remote) log.Info("updating TUF data") if _, err := client.Update(); err != nil && !tuf.IsLatestSnapshot(err) { log.Error("error updating TUF client", "err", err) return err } // read the TUF db so we can pass it to hosts log.Info("reading TUF database") tufDB, err := ioutil.ReadFile(args.String["--tuf-db"]) if err != nil { log.Error("error reading the TUF database", "err", err) return err } log.Info("getting host list") clusterClient := cluster.NewClient() hosts, err := clusterClient.Hosts() if err != nil { log.Error("error getting host list", "err", err) return err } if len(hosts) == 0 { return errors.New("no hosts found") } log.Info("pulling images on all hosts") images := make(map[string]string) var imageMtx sync.Mutex hostErrs := make(chan error) for _, h := range hosts { go func(host *cluster.Host) { log := log.New("host", host.ID()) log.Info("connecting to host") log.Info("pulling images") ch := make(chan *layer.PullInfo) stream, err := host.PullImages( args.String["--repository"], args.String["--driver"], args.String["--root"], bytes.NewReader(tufDB), ch, ) if err != nil { log.Error("error pulling images", "err", err) hostErrs <- err return } defer stream.Close() for info := range ch { if info.Type == layer.TypeLayer { continue } log.Info("pulled image", "name", info.Repo) imageURI := fmt.Sprintf("%s?name=%s&id=%s", args.String["--repository"], info.Repo, info.ID) imageMtx.Lock() images[info.Repo] = imageURI imageMtx.Unlock() } hostErrs <- stream.Err() }(h) } var hostErr error for _, h := range hosts { if err := <-hostErrs; err != nil { log.Error("error pulling images", "host", h.ID(), "err", err) hostErr = err continue } log.Info("images pulled successfully", "host", h.ID()) } if hostErr != nil { return hostErr } updaterImage, ok := images["flynn/updater"] if !ok { e := "missing flynn/updater image" log.Error(e) return errors.New(e) } imageJSON, err := json.Marshal(images) if err != nil { log.Error("error encoding images", "err", err) return err } // use a flag to determine whether to use a TTY log formatter because actually // assigning a TTY to the job causes reading images via stdin to fail. cmd := exec.Command(exec.DockerImage(updaterImage), fmt.Sprintf("--tty=%t", term.IsTerminal(os.Stdout.Fd()))) cmd.Stdin = bytes.NewReader(imageJSON) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return err } log.Info("update complete") return nil }