Esempio n. 1
0
func (u *Updater) update() error {
	up := update.New()
	if err := up.CanUpdate(); err != nil {
		return err
	}

	if err := os.MkdirAll(updateDir, 0755); err != nil {
		return err
	}
	local, err := tuf.FileLocalStore(filepath.Join(updateDir, "tuf.db"))
	if err != nil {
		return err
	}
	plat := fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH)
	opts := &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("flynn-cli/%s %s", version.String(), plat),
		Retries:   tuf.DefaultHTTPRetries,
	}
	remote, err := tuf.HTTPRemoteStore(u.repo, opts)
	if err != nil {
		return err
	}
	client := tuf.NewClient(local, remote)
	if err := u.updateTUFClient(client); err != nil {
		return err
	}

	name := fmt.Sprintf("/flynn-%s.gz", plat)

	latestVersion, err := tufutil.GetVersion(client, name)
	if err != nil {
		return err
	}
	if latestVersion == version.String() {
		return nil
	}

	bin := &tufBuffer{}
	if err := client.Download(name, bin); err != nil {
		return err
	}
	gr, err := gzip.NewReader(bin)
	if err != nil {
		return err
	}

	err, errRecover := up.FromStream(gr)
	if errRecover != nil {
		return fmt.Errorf("update and recovery errors: %q %q", err, errRecover)
	}
	if err != nil {
		return err
	}
	log.Printf("Updated %s -> %s.", version.String(), latestVersion)
	return nil
}
Esempio n. 2
0
func NewToken() (string, error) {
	uri := "https://discovery.flynn.io/clusters"
	if base := os.Getenv("DISCOVERY_SERVER"); base != "" {
		uri = base + "/clusters"
	}

	req, err := http.NewRequest("POST", uri, nil)
	if err != nil {
		return "", err
	}
	req.Header.Set("User-Agent", fmt.Sprintf("flynn-host/%s %s-%s", version.String(), runtime.GOOS, runtime.GOARCH))
	res, err := http.DefaultClient.Do(req)
	if err != nil {
		return "", err
	}
	if res.StatusCode != http.StatusCreated {
		return "", urlError("POST", uri, res.StatusCode)
	}

	base, err := url.Parse(uri)
	if err != nil {
		return "", err
	}
	cluster, err := url.Parse(res.Header.Get("Location"))
	if err != nil {
		return "", err
	}

	return base.ResolveReference(cluster).String(), nil
}
Esempio n. 3
0
func main() {
	defer shutdown.Exit()

	usage := `
usage: flynn-blobstore <command> [<args>...]

Commands:
        help        show usage for a specific command
        cleanup     delete file blobs from default backend 
        migrate     move file blobs from default backend to a different backend
        server      run blobstore HTTP server

See 'flynn-blobstore help <command>' for more information on a specific command.
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), true)

	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn-blobstore help`
			fmt.Println(usage)
			return
		} else { // `flynn-blobstore help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if err := runCommand(cmd, cmdArgs); err != nil {
		log.Println(err)
		shutdown.ExitWithCode(1)
	}
}
Esempio n. 4
0
func (h *jobAPI) PullBinariesAndConfig(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
	log := h.host.log.New("fn", "PullBinariesAndConfig")

	log.Info("extracting TUF database")
	tufDB, err := extractTufDB(r)
	if err != nil {
		log.Error("error extracting TUF database", "err", err)
		httphelper.Error(w, err)
		return
	}
	defer os.Remove(tufDB)

	query := r.URL.Query()

	log.Info("creating local TUF store")
	local, err := tuf.FileLocalStore(tufDB)
	if err != nil {
		log.Error("error creating local TUF store", "err", err)
		httphelper.Error(w, err)
		return
	}
	opts := &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("flynn-host/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH),
		Retries:   tuf.DefaultHTTPRetries,
	}
	log.Info("creating remote TUF store")
	remote, err := tuf.HTTPRemoteStore(query.Get("repository"), opts)
	if err != nil {
		log.Error("error creating remote TUF store", "err", err)
		httphelper.Error(w, err)
		return
	}
	client := tuf.NewClient(local, remote)
	d := downloader.New(client, query.Get("version"))

	log.Info("downloading binaries")
	paths, err := d.DownloadBinaries(query.Get("bin-dir"))
	if err != nil {
		log.Error("error downloading binaries", "err", err)
		httphelper.Error(w, err)
		return
	}

	log.Info("downloading config")
	configs, err := d.DownloadConfig(query.Get("config-dir"))
	if err != nil {
		log.Error("error downloading config", "err", err)
		httphelper.Error(w, err)
		return
	}
	for k, v := range configs {
		paths[k] = v
	}

	httphelper.JSON(w, 200, paths)
}
Esempio n. 5
0
// main is a modified version of the registry main function:
// https://github.com/docker/distribution/blob/6ba799b/cmd/registry/main.go
func main() {
	logrus.SetLevel(logrus.InfoLevel)

	ctx := context.Background()
	ctx = context.WithValue(ctx, "version", version.String())
	ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version"))

	client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY"))
	if err != nil {
		context.GetLogger(ctx).Fatalln(err)
	}

	release, err := client.GetRelease(os.Getenv("FLYNN_RELEASE_ID"))
	if err != nil {
		context.GetLogger(ctx).Fatalln(err)
	}
	artifact, err := client.GetArtifact(release.ArtifactIDs[0])
	if err != nil {
		context.GetLogger(ctx).Fatalln(err)
	}

	authKey := os.Getenv("AUTH_KEY")

	middleware.Register("flynn", repositoryMiddleware(client, artifact, authKey))

	config := configuration.Configuration{
		Version: configuration.CurrentVersion,
		Storage: configuration.Storage{
			blobstore.DriverName: configuration.Parameters{},
			"delete":             configuration.Parameters{"enabled": true},
		},
		Middleware: map[string][]configuration.Middleware{
			"repository": {
				{Name: "flynn"},
			},
		},
		Auth: configuration.Auth{
			"flynn": configuration.Parameters{
				"auth_key": authKey,
			},
		},
	}
	config.HTTP.Secret = os.Getenv("REGISTRY_HTTP_SECRET")

	status.AddHandler(status.HealthyHandler)

	app := handlers.NewApp(ctx, config)
	http.Handle("/", app)

	addr := ":" + os.Getenv("PORT")
	context.GetLogger(app).Infof("listening on %s", addr)
	if err := http.ListenAndServe(addr, nil); err != nil {
		context.GetLogger(app).Fatalln(err)
	}
}
Esempio n. 6
0
func main() {
	// glog will log to tmp files by default. override so all entries
	// can flow into journald (if running under systemd)
	flag.Set("logtostderr", "true")

	// now parse command line args
	flag.Parse()

	if opts.help {
		fmt.Fprintf(os.Stderr, "Usage: %s [OPTION]...\n", os.Args[0])
		flag.PrintDefaults()
		os.Exit(0)
	}

	if opts.version {
		fmt.Fprintln(os.Stderr, version.String())
		os.Exit(0)
	}

	flagsFromEnv("FLANNELD", flag.CommandLine)

	be, sm, err := newBackend()
	if err != nil {
		log.Info(err)
		os.Exit(1)
	}

	// Register for SIGINT and SIGTERM and wait for one of them to arrive
	log.Info("Installing signal handlers")
	sigs := make(chan os.Signal, 1)
	signal.Notify(sigs, os.Interrupt, syscall.SIGTERM)

	exit := make(chan int)
	go run(be, sm, exit)

	for {
		select {
		case <-sigs:
			// unregister to get default OS nuke behaviour in case we don't exit cleanly
			signal.Stop(sigs)

			log.Info("Exiting...")
			be.Stop()

		case code := <-exit:
			log.Infof("%s mode exited", be.Name())
			os.Exit(code)
		}
	}
}
Esempio n. 7
0
func PullImages(tufDB, repository, driver, root, ver string, progress chan<- layer.PullInfo) error {
	local, err := tuf.FileLocalStore(tufDB)
	if err != nil {
		return err
	}
	opts := &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("pinkerton/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH),
	}
	remote, err := tuf.HTTPRemoteStore(repository, opts)
	if err != nil {
		return err
	}
	return PullImagesWithClient(tuf.NewClient(local, remote), repository, driver, root, ver, progress)
}
Esempio n. 8
0
func newTufClient(tufDB, repository string) (*tuf.Client, error) {
	local, err := tuf.FileLocalStore(tufDB)
	if err != nil {
		return nil, err
	}
	opts := &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("flynn-host/%s %s-%s pull", version.String(), runtime.GOOS, runtime.GOARCH),
		Retries:   tufutil.DefaultHTTPRetries,
	}
	remote, err := tuf.HTTPRemoteStore(repository, opts)
	if err != nil {
		return nil, err
	}
	return tuf.NewClient(local, remote), nil
}
Esempio n. 9
0
func interpolateManifest(imageDir, imageRepository string, src io.Reader, dest io.Writer) error {
	manifest, err := ioutil.ReadAll(src)
	if err != nil {
		return err
	}
	var replaceErr interface{}
	func() {
		defer func() {
			replaceErr = recover()
		}()
		manifest = imageArtifactPattern.ReplaceAllFunc(manifest, func(raw []byte) []byte {
			name := string(raw[16 : len(raw)-1])

			manifest, err := ioutil.ReadFile(filepath.Join(imageDir, name+".json"))
			if err != nil {
				panic(err)
			}

			artifact := &ct.Artifact{
				Type:        ct.ArtifactTypeFlynn,
				RawManifest: manifest,
				Size:        int64(len(manifest)),
				Meta: map[string]string{
					"flynn.component":    name,
					"flynn.system-image": "true",
				},
			}
			artifact.URI = fmt.Sprintf("%s?target=/%s/images/%s.json", imageRepository, version.String(), artifact.Manifest().ID())
			artifact.Hashes = artifact.Manifest().Hashes()
			if version.Dev() {
				artifact.LayerURLTemplate = "file:///var/lib/flynn/layer-cache/{id}.squashfs"
			} else {
				artifact.LayerURLTemplate = fmt.Sprintf("%s?target=/%s/layers/{id}.squashfs", imageRepository, version.String())
			}
			data, err := json.Marshal(artifact)
			if err != nil {
				panic(err)
			}
			return data
		})
	}()
	if replaceErr != nil {
		return replaceErr.(error)
	}
	_, err = dest.Write(manifest)
	return err
}
Esempio n. 10
0
func RegisterInstance(info Info) (string, error) {
	data := struct {
		Data Instance `json:"data"`
	}{Instance{
		Name:          info.Name,
		URL:           info.InstanceURL,
		SSHPublicKeys: make([]SSHPublicKey, 0, 4),
		FlynnVersion:  version.String(),
	}}

	for _, t := range []string{"dsa", "rsa", "ecdsa", "ed25519"} {
		keyData, err := ioutil.ReadFile(fmt.Sprintf("/etc/ssh/ssh_host_%s_key.pub", t))
		if err != nil {
			// TODO(titanous): log this?
			continue
		}
		k, _, _, _, err := ssh.ParseAuthorizedKey(keyData)
		if err != nil {
			// TODO(titanous): log this?
			continue
		}
		data.Data.SSHPublicKeys = append(data.Data.SSHPublicKeys, SSHPublicKey{Type: t, Data: k.Marshal()})
	}

	jsonData, err := json.Marshal(&data)
	if err != nil {
		return "", err
	}
	// TODO(titanous): retry
	uri := info.ClusterURL + "/instances"
	res, err := http.Post(uri, "application/json", bytes.NewReader(jsonData))
	if err != nil {
		return "", err
	}
	if res.StatusCode != http.StatusCreated && res.StatusCode != http.StatusConflict {
		return "", urlError("POST", uri, res.StatusCode)
	}
	if err := json.NewDecoder(res.Body).Decode(&data); err != nil {
		return "", err
	}
	return data.Data.ID, nil
}
Esempio n. 11
0
File: main.go Progetto: devick/flynn
func main() {
	usage := `
Usage: taffy <app> <repo> <branch> <rev> [-e <var>=<val>]... [-m <key>=<val>]...

Options:
	-e,--env <var>=<val>
	-m,--meta <key>=<val>
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), false)

	app := args.String["<app>"]
	repo := args.String["<repo>"]
	branch := args.String["<branch>"]
	rev := args.String["<rev>"]

	meta := map[string]string{
		"git":       "true",
		"clone_url": repo,
		"branch":    branch,
		"rev":       rev,
		"taffy_job": os.Getenv("FLYNN_JOB_ID"),
	}

	env, err := parsePairs(args, "--env")
	if err != nil {
		log.Fatal(err)
	}
	m, err := parsePairs(args, "--meta")
	if err != nil {
		log.Fatal(err)
	}
	for k, v := range m {
		meta[k] = v
	}

	if err := cloneRepo(repo, branch); err != nil {
		log.Fatal(err)
	}
	if err := runReceiver(app, rev, env, meta); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 12
0
func (f Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
	w.Header().Set("Content-Type", "application/json")

	s := f()
	s.Version = version.String()
	if s.Status == CodeHealthy {
		w.WriteHeader(200)
	} else {
		if s.Status == "" {
			s.Status = CodeUnhealthy
		}
		w.WriteHeader(500)
	}

	res, _ := json.MarshalIndent(struct {
		Data Status `json:"data"`
	}{s}, "", "  ")
	w.Write(res)
	w.Write([]byte("\n"))
}
Esempio n. 13
0
func main() {
	client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY"))
	if err != nil {
		log.Fatalln("Unable to connect to controller:", err)
	}

	usage := `
Usage: flynn-receiver <app> <rev> [-e <var>=<val>]... [-m <key>=<val>]...

Options:
	-e,--env <var>=<val>
	-m,--meta <key>=<val>
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), false)

	appName := args.String["<app>"]
	env, err := parsePairs(args, "--env")
	if err != nil {
		log.Fatal(err)
	}
	meta, err := parsePairs(args, "--meta")
	if err != nil {
		log.Fatal(err)
	}

	app, err := client.GetApp(appName)
	if err == controller.ErrNotFound {
		log.Fatalf("Unknown app %q", appName)
	} else if err != nil {
		log.Fatalln("Error retrieving app:", err)
	}
	prevRelease, err := client.GetAppRelease(app.Name)
	if err == controller.ErrNotFound {
		prevRelease = &ct.Release{}
	} else if err != nil {
		log.Fatalln("Error getting current app release:", err)
	}

	fmt.Printf("-----> Building %s...\n", app.Name)

	var output bytes.Buffer
	slugURL := fmt.Sprintf("%s/%s.tgz", blobstoreURL, random.UUID())
	cmd := exec.Command(exec.DockerImage(os.Getenv("SLUGBUILDER_IMAGE_URI")), slugURL)
	cmd.Stdout = io.MultiWriter(os.Stdout, &output)
	cmd.Stderr = os.Stderr
	cmd.Meta = map[string]string{
		"flynn-controller.app":      app.ID,
		"flynn-controller.app_name": app.Name,
		"flynn-controller.release":  prevRelease.ID,
	}
	if len(prevRelease.Env) > 0 {
		stdin, err := cmd.StdinPipe()
		if err != nil {
			log.Fatalln(err)
		}
		go appendEnvDir(os.Stdin, stdin, prevRelease.Env)
	} else {
		cmd.Stdin = os.Stdin
	}
	cmd.Env = make(map[string]string)
	cmd.Env["BUILD_CACHE_URL"] = fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID)
	if buildpackURL, ok := env["BUILDPACK_URL"]; ok {
		cmd.Env["BUILDPACK_URL"] = buildpackURL
	} else if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok {
		cmd.Env["BUILDPACK_URL"] = buildpackURL
	}
	for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} {
		if v := os.Getenv(k); v != "" {
			cmd.Env[k] = v
		}
	}

	if err := cmd.Run(); err != nil {
		log.Fatalln("Build failed:", err)
	}

	var types []string
	if match := typesPattern.FindSubmatch(output.Bytes()); match != nil {
		types = strings.Split(string(match[1]), ", ")
	}

	fmt.Printf("-----> Creating release...\n")

	artifact := &ct.Artifact{Type: "docker", URI: os.Getenv("SLUGRUNNER_IMAGE_URI")}
	if err := client.CreateArtifact(artifact); err != nil {
		log.Fatalln("Error creating artifact:", err)
	}

	release := &ct.Release{
		ArtifactID: artifact.ID,
		Env:        prevRelease.Env,
		Meta:       prevRelease.Meta,
	}
	if release.Meta == nil {
		release.Meta = make(map[string]string, len(meta))
	}
	if release.Env == nil {
		release.Env = make(map[string]string, len(env))
	}
	for k, v := range env {
		release.Env[k] = v
	}
	for k, v := range meta {
		release.Meta[k] = v
	}
	procs := make(map[string]ct.ProcessType)
	for _, t := range types {
		proc := prevRelease.Processes[t]
		proc.Cmd = []string{"start", t}
		if t == "web" || strings.HasSuffix(t, "-web") {
			proc.Service = app.Name + "-" + t
			proc.Ports = []ct.Port{{
				Port:  8080,
				Proto: "tcp",
				Service: &host.Service{
					Name:   proc.Service,
					Create: true,
					Check:  &host.HealthCheck{Type: "tcp"},
				},
			}}
		}
		procs[t] = proc
	}
	release.Processes = procs
	if release.Env == nil {
		release.Env = make(map[string]string)
	}
	release.Env["SLUG_URL"] = slugURL

	if err := client.CreateRelease(release); err != nil {
		log.Fatalln("Error creating release:", err)
	}
	if err := client.DeployAppRelease(app.Name, release.ID); err != nil {
		log.Fatalln("Error deploying app release:", err)
	}

	fmt.Println("=====> Application deployed")

	if needsDefaultScale(app.ID, prevRelease.ID, procs, client) {
		formation := &ct.Formation{
			AppID:     app.ID,
			ReleaseID: release.ID,
			Processes: map[string]int{"web": 1},
		}

		watcher, err := client.WatchJobEvents(app.ID, release.ID)
		if err != nil {
			log.Fatalln("Error streaming job events", err)
			return
		}
		defer watcher.Close()

		if err := client.PutFormation(formation); err != nil {
			log.Fatalln("Error putting formation:", err)
		}
		fmt.Println("=====> Waiting for web job to start...")

		err = watcher.WaitFor(ct.JobEvents{"web": ct.JobUpEvents(1)}, scaleTimeout, func(e *ct.Job) error {
			switch e.State {
			case ct.JobStateUp:
				fmt.Println("=====> Default web formation scaled to 1")
			case ct.JobStateDown:
				return fmt.Errorf("Failed to scale web process type")
			}
			return nil
		})
		if err != nil {
			log.Fatalln(err.Error())
		}
	}
}
Esempio n. 14
0
func main() {
	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  version                    Show current version

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				log.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				log.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
Esempio n. 15
0
func runDownload(args *docopt.Args) error {
	log := log15.New()

	log.Info("initializing ZFS volumes")
	volPath := args.String["--volpath"]
	volDB := filepath.Join(volPath, "volumes.bolt")
	volMan := volumemanager.New(volDB, log, func() (volume.Provider, error) {
		return zfs.NewProvider(&zfs.ProviderConfig{
			DatasetName: zfs.DefaultDatasetName,
			Make:        zfs.DefaultMakeDev(volPath, log),
			WorkingDir:  filepath.Join(volPath, "zfs"),
		})
	})
	if err := volMan.OpenDB(); err != nil {
		log.Error("error opening volume database, make sure flynn-host is not running", "err", err)
		return err
	}

	// create a TUF client and update it
	log.Info("initializing TUF client")
	tufDB := args.String["--tuf-db"]
	local, err := tuf.FileLocalStore(tufDB)
	if err != nil {
		log.Error("error creating local TUF client", "err", err)
		return err
	}
	remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("downloader"))
	if err != nil {
		log.Error("error creating remote TUF client", "err", err)
		return err
	}
	client := tuf.NewClient(local, remote)
	if err := updateTUFClient(client); err != nil {
		log.Error("error updating TUF client", "err", err)
		return err
	}

	configDir := args.String["--config-dir"]

	requestedVersion := os.Getenv("FLYNN_VERSION")
	if requestedVersion == "" {
		requestedVersion, err = getChannelVersion(configDir, client, log)
		if err != nil {
			return err
		}
	}
	log.Info(fmt.Sprintf("downloading components with version %s", requestedVersion))

	d := downloader.New(client, volMan, requestedVersion)

	binDir := args.String["--bin-dir"]
	log.Info(fmt.Sprintf("downloading binaries to %s", binDir))
	if _, err := d.DownloadBinaries(binDir); err != nil {
		log.Error("error downloading binaries", "err", err)
		return err
	}

	// use the requested version of flynn-host to download the images as
	// the format changed in v20161106
	if version.String() != requestedVersion {
		log.Info(fmt.Sprintf("executing %s flynn-host binary", requestedVersion))
		binPath := filepath.Join(binDir, "flynn-host")
		argv := append([]string{binPath}, os.Args[1:]...)
		return syscall.Exec(binPath, argv, os.Environ())
	}

	log.Info("downloading images")
	ch := make(chan *ct.ImagePullInfo)
	go func() {
		for info := range ch {
			switch info.Type {
			case ct.ImagePullTypeImage:
				log.Info(fmt.Sprintf("pulling %s image", info.Name))
			case ct.ImagePullTypeLayer:
				log.Info(fmt.Sprintf("pulling %s layer %s (%s)",
					info.Name, info.Layer.ID, units.BytesSize(float64(info.Layer.Length))))
			}
		}
	}()
	if err := d.DownloadImages(configDir, ch); err != nil {
		log.Error("error downloading images", "err", err)
		return err
	}

	log.Info(fmt.Sprintf("downloading config to %s", configDir))
	if _, err := d.DownloadConfig(configDir); err != nil {
		log.Error("error downloading config", "err", err)
		return err
	}

	log.Info("download complete")
	return nil
}
Esempio n. 16
0
func (u *Updater) update() error {
	up := update.New()
	if err := up.CanUpdate(); err != nil {
		return err
	}

	if err := os.MkdirAll(updateDir, 0755); err != nil {
		return err
	}
	local, err := tuf.FileLocalStore(filepath.Join(updateDir, "tuf.db"))
	if err != nil {
		return err
	}
	plat := fmt.Sprintf("%s-%s", runtime.GOOS, runtime.GOARCH)
	opts := &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("flynn-cli/%s %s", version.String(), plat),
	}
	remote, err := tuf.HTTPRemoteStore(u.repo, opts)
	if err != nil {
		return err
	}
	client := tuf.NewClient(local, remote)
	if err := u.updateTUFClient(client); err != nil {
		return err
	}
	targets, err := client.Targets()
	if err != nil {
		return err
	}

	name := fmt.Sprintf("/flynn-%s.gz", plat)
	target, ok := targets[name]
	if !ok {
		return fmt.Errorf("missing %q in tuf targets", name)
	}
	if target.Custom == nil || len(*target.Custom) == 0 {
		return errors.New("missing custom metadata in tuf target")
	}
	var data struct {
		Version string
	}
	json.Unmarshal(*target.Custom, &data)
	if data.Version == "" {
		return errors.New("missing version in tuf target")
	}
	if data.Version == version.String() {
		return nil
	}

	bin := &tufBuffer{}
	if err := client.Download(name, bin); err != nil {
		return err
	}
	gr, err := gzip.NewReader(bin)
	if err != nil {
		return err
	}

	err, errRecover := up.FromStream(gr)
	if errRecover != nil {
		return fmt.Errorf("update and recovery errors: %q %q", err, errRecover)
	}
	if err != nil {
		return err
	}
	log.Printf("Updated %s -> %s.", version.String(), data.Version)
	return nil
}
Esempio n. 17
0
func runUpdate(args *docopt.Args) error {
	log := log15.New()

	// create and update a TUF client
	log.Info("initializing TUF client")
	local, err := tuf.FileLocalStore(args.String["--tuf-db"])
	if err != nil {
		log.Error("error creating local TUF client", "err", err)
		return err
	}
	remote, err := tuf.HTTPRemoteStore(args.String["--repository"], tufHTTPOpts("updater"))
	if err != nil {
		log.Error("error creating remote TUF client", "err", err)
		return err
	}
	client := tuf.NewClient(local, remote)

	if !args.Bool["--is-latest"] {
		return updateAndExecLatest(args.String["--config-dir"], client, log)
	}

	// unlink the current binary if it is a temp file
	if args.Bool["--is-tempfile"] {
		os.Remove(os.Args[0])
	}

	// read the TUF db so we can pass it to hosts
	log.Info("reading TUF database")
	tufDB, err := ioutil.ReadFile(args.String["--tuf-db"])
	if err != nil {
		log.Error("error reading the TUF database", "err", err)
		return err
	}

	log.Info("getting host list")
	clusterClient := cluster.NewClient()
	hosts, err := clusterClient.Hosts()
	if err != nil {
		log.Error("error getting host list", "err", err)
		return err
	}
	if len(hosts) == 0 {
		return errors.New("no hosts found")
	}

	log.Info(fmt.Sprintf("updating %d hosts", len(hosts)))

	// eachHost invokes the given function in a goroutine for each host,
	// returning an error if any of the functions returns an error.
	eachHost := func(f func(*cluster.Host, log15.Logger) error) (err error) {
		errs := make(chan error)
		for _, h := range hosts {
			go func(host *cluster.Host) {
				log := log.New("host", host.ID())
				errs <- f(host, log)
			}(h)
		}
		for range hosts {
			if e := <-errs; e != nil {
				err = e
			}
		}
		return
	}

	var mtx sync.Mutex
	images := make(map[string]string)
	log.Info("pulling latest images on all hosts")
	if err := eachHost(func(host *cluster.Host, log log15.Logger) error {
		log.Info("pulling images")
		ch := make(chan *layer.PullInfo)
		stream, err := host.PullImages(
			args.String["--repository"],
			args.String["--driver"],
			args.String["--root"],
			version.String(),
			bytes.NewReader(tufDB),
			ch,
		)
		if err != nil {
			log.Error("error pulling images", "err", err)
			return err
		}
		defer stream.Close()
		for info := range ch {
			if info.Type == layer.TypeLayer {
				continue
			}
			log.Info("pulled image", "name", info.Repo)
			imageURI := fmt.Sprintf("%s?name=%s&id=%s", args.String["--repository"], info.Repo, info.ID)
			mtx.Lock()
			images[info.Repo] = imageURI
			mtx.Unlock()
		}
		if err := stream.Err(); err != nil {
			log.Error("error pulling images", "err", err)
			return err
		}
		return nil
	}); err != nil {
		return err
	}

	var binaries map[string]string
	log.Info("pulling latest binaries and config on all hosts")
	if err := eachHost(func(host *cluster.Host, log log15.Logger) error {
		log.Info("pulling binaries and config")
		paths, err := host.PullBinariesAndConfig(
			args.String["--repository"],
			args.String["--bin-dir"],
			args.String["--config-dir"],
			version.String(),
			bytes.NewReader(tufDB),
		)
		if err != nil {
			log.Error("error pulling binaries and config", "err", err)
			return err
		}
		mtx.Lock()
		binaries = paths
		mtx.Unlock()
		log.Info("binaries and config pulled successfully")
		return nil
	}); err != nil {
		return err
	}

	log.Info("validating binaries")
	flynnHost, ok := binaries["flynn-host"]
	if !ok {
		return fmt.Errorf("missing flynn-host binary")
	}
	flynnInit, ok := binaries["flynn-init"]
	if !ok {
		return fmt.Errorf("missing flynn-init binary")
	}
	flynnNSUmount, ok := binaries["flynn-nsumount"]
	if !ok {
		return fmt.Errorf("missing flynn-nsumount binary")
	}

	log.Info("updating flynn-host daemon on all hosts")
	if err := eachHost(func(host *cluster.Host, log log15.Logger) error {
		// TODO(lmars): handle daemons using custom flags (e.g. --state=/foo)
		_, err := host.Update(
			flynnHost,
			"daemon",
			"--id", host.ID(),
			"--flynn-init", flynnInit,
			"--nsumount", flynnNSUmount,
		)
		if err != nil {
			log.Error("error updating binaries", "err", err)
			return err
		}
		log.Info("flynn-host updated successfully")
		return nil
	}); err != nil {
		return err
	}

	updaterImage, ok := images["flynn/updater"]
	if !ok {
		e := "missing flynn/updater image"
		log.Error(e)
		return errors.New(e)
	}
	imageJSON, err := json.Marshal(images)
	if err != nil {
		log.Error("error encoding images", "err", err)
		return err
	}

	// use a flag to determine whether to use a TTY log formatter because actually
	// assigning a TTY to the job causes reading images via stdin to fail.
	cmd := exec.Command(exec.DockerImage(updaterImage), fmt.Sprintf("--tty=%t", term.IsTerminal(os.Stdout.Fd())))
	cmd.Stdin = bytes.NewReader(imageJSON)
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr
	if err := cmd.Run(); err != nil {
		return err
	}
	log.Info("update complete")
	return nil
}
Esempio n. 18
0
func runVersion() {
	fmt.Println(version.String())
}
Esempio n. 19
0
func run() error {
	client, err := controller.NewClient("", os.Getenv("CONTROLLER_KEY"))
	if err != nil {
		return fmt.Errorf("Unable to connect to controller: %s", err)
	}

	usage := `
Usage: flynn-receiver <app> <rev> [-e <var>=<val>]... [-m <key>=<val>]...

Options:
	-e,--env <var>=<val>
	-m,--meta <key>=<val>
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), false)

	appName := args.String["<app>"]
	env, err := parsePairs(args, "--env")
	if err != nil {
		return err
	}
	meta, err := parsePairs(args, "--meta")
	if err != nil {
		return err
	}

	slugBuilder, err := client.GetArtifact(os.Getenv("SLUGBUILDER_IMAGE_ID"))
	if err != nil {
		return fmt.Errorf("Error getting slugbuilder image: %s", err)
	}

	slugRunnerID := os.Getenv("SLUGRUNNER_IMAGE_ID")
	if _, err := client.GetArtifact(slugRunnerID); err != nil {
		return fmt.Errorf("Error getting slugrunner image: %s", err)
	}

	app, err := client.GetApp(appName)
	if err == controller.ErrNotFound {
		return fmt.Errorf("Unknown app %q", appName)
	} else if err != nil {
		return fmt.Errorf("Error retrieving app: %s", err)
	}
	prevRelease, err := client.GetAppRelease(app.Name)
	if err == controller.ErrNotFound {
		prevRelease = &ct.Release{}
	} else if err != nil {
		return fmt.Errorf("Error getting current app release: %s", err)
	}

	fmt.Printf("-----> Building %s...\n", app.Name)

	slugImageID := random.UUID()
	jobEnv := map[string]string{
		"BUILD_CACHE_URL": fmt.Sprintf("%s/%s-cache.tgz", blobstoreURL, app.ID),
		"CONTROLLER_KEY":  os.Getenv("CONTROLLER_KEY"),
		"SLUG_IMAGE_ID":   slugImageID,
	}
	if buildpackURL, ok := env["BUILDPACK_URL"]; ok {
		jobEnv["BUILDPACK_URL"] = buildpackURL
	} else if buildpackURL, ok := prevRelease.Env["BUILDPACK_URL"]; ok {
		jobEnv["BUILDPACK_URL"] = buildpackURL
	}
	for _, k := range []string{"SSH_CLIENT_KEY", "SSH_CLIENT_HOSTS"} {
		if v := os.Getenv(k); v != "" {
			jobEnv[k] = v
		}
	}

	job := &host.Job{
		Config: host.ContainerConfig{
			Args:       []string{"/builder/build.sh"},
			Env:        jobEnv,
			Stdin:      true,
			DisableLog: true,
		},
		Partition: "background",
		Metadata: map[string]string{
			"flynn-controller.app":      app.ID,
			"flynn-controller.app_name": app.Name,
			"flynn-controller.release":  prevRelease.ID,
			"flynn-controller.type":     "slugbuilder",
		},
		Resources: resource.Defaults(),
	}
	if sb, ok := prevRelease.Processes["slugbuilder"]; ok {
		job.Resources = sb.Resources
	} else if rawLimit := os.Getenv("SLUGBUILDER_DEFAULT_MEMORY_LIMIT"); rawLimit != "" {
		if limit, err := resource.ParseLimit(resource.TypeMemory, rawLimit); err == nil {
			job.Resources[resource.TypeMemory] = resource.Spec{Limit: &limit, Request: &limit}
		}
	}

	cmd := exec.Job(slugBuilder, job)
	cmd.Volumes = []*ct.VolumeReq{{Path: "/tmp", DeleteOnStop: true}}
	var output bytes.Buffer
	cmd.Stdout = io.MultiWriter(os.Stdout, &output)
	cmd.Stderr = os.Stderr

	releaseEnv := make(map[string]string, len(env))
	if prevRelease.Env != nil {
		for k, v := range prevRelease.Env {
			releaseEnv[k] = v
		}
	}
	for k, v := range env {
		releaseEnv[k] = v
	}

	if len(releaseEnv) > 0 {
		stdin, err := cmd.StdinPipe()
		if err != nil {
			return err
		}
		go func() {
			if err := appendEnvDir(os.Stdin, stdin, releaseEnv); err != nil {
				log.Fatalln("ERROR:", err)
			}
		}()
	} else {
		cmd.Stdin = os.Stdin
	}

	shutdown.BeforeExit(func() { cmd.Kill() })
	if err := cmd.Run(); err != nil {
		return fmt.Errorf("Build failed: %s", err)
	}

	var types []string
	if match := typesPattern.FindSubmatch(output.Bytes()); match != nil {
		types = strings.Split(string(match[1]), ", ")
	}

	fmt.Printf("-----> Creating release...\n")

	release := &ct.Release{
		ArtifactIDs: []string{slugRunnerID, slugImageID},
		Env:         releaseEnv,
		Meta:        prevRelease.Meta,
	}
	if release.Meta == nil {
		release.Meta = make(map[string]string, len(meta))
	}
	for k, v := range meta {
		release.Meta[k] = v
	}
	procs := make(map[string]ct.ProcessType)
	for _, t := range types {
		proc := prevRelease.Processes[t]
		proc.Args = []string{"/runner/init", "start", t}
		if (t == "web" || strings.HasSuffix(t, "-web")) && proc.Service == "" {
			proc.Service = app.Name + "-" + t
			proc.Ports = []ct.Port{{
				Port:  8080,
				Proto: "tcp",
				Service: &host.Service{
					Name:   proc.Service,
					Create: true,
					Check:  &host.HealthCheck{Type: "tcp"},
				},
			}}
		}
		procs[t] = proc
	}
	if sb, ok := prevRelease.Processes["slugbuilder"]; ok {
		procs["slugbuilder"] = sb
	}
	release.Processes = procs

	if err := client.CreateRelease(release); err != nil {
		return fmt.Errorf("Error creating release: %s", err)
	}
	if err := client.DeployAppRelease(app.Name, release.ID, nil); err != nil {
		return fmt.Errorf("Error deploying app release: %s", err)
	}

	// if the app has a web job and has not been scaled before, create a
	// web=1 formation and wait for the "APPNAME-web" service to start
	// (whilst also watching job events so the deploy fails if the job
	// crashes)
	if needsDefaultScale(app.ID, prevRelease.ID, procs, client) {
		fmt.Println("=====> Scaling initial release to web=1")

		formation := &ct.Formation{
			AppID:     app.ID,
			ReleaseID: release.ID,
			Processes: map[string]int{"web": 1},
		}

		jobEvents := make(chan *ct.Job)
		jobStream, err := client.StreamJobEvents(app.ID, jobEvents)
		if err != nil {
			return fmt.Errorf("Error streaming job events: %s", err)
		}
		defer jobStream.Close()

		serviceEvents := make(chan *discoverd.Event)
		serviceStream, err := discoverd.NewService(app.Name + "-web").Watch(serviceEvents)
		if err != nil {
			return fmt.Errorf("Error streaming service events: %s", err)
		}
		defer serviceStream.Close()

		if err := client.PutFormation(formation); err != nil {
			return fmt.Errorf("Error putting formation: %s", err)
		}
		fmt.Println("-----> Waiting for initial web job to start...")

		err = func() error {
			for {
				select {
				case e, ok := <-serviceEvents:
					if !ok {
						return fmt.Errorf("Service stream closed unexpectedly: %s", serviceStream.Err())
					}
					if e.Kind == discoverd.EventKindUp && e.Instance.Meta["FLYNN_RELEASE_ID"] == release.ID {
						fmt.Println("=====> Initial web job started")
						return nil
					}
				case e, ok := <-jobEvents:
					if !ok {
						return fmt.Errorf("Job stream closed unexpectedly: %s", jobStream.Err())
					}
					if e.State == ct.JobStateDown {
						return errors.New("Initial web job failed to start")
					}
				case <-time.After(time.Duration(app.DeployTimeout) * time.Second):
					return errors.New("Timed out waiting for initial web job to start")
				}
			}
		}()
		if err != nil {
			fmt.Println("-----> WARN: scaling initial release down to web=0 due to error")
			formation.Processes["web"] = 0
			if err := client.PutFormation(formation); err != nil {
				// just print this error and return the original error
				fmt.Println("-----> WARN: could not scale the initial release down (it may continue to run):", err)
			}
			return err
		}
	}

	fmt.Println("=====> Application deployed")
	return nil
}
Esempio n. 20
0
func tufHTTPOpts(name string) *tuf.HTTPRemoteOptions {
	return &tuf.HTTPRemoteOptions{
		UserAgent: fmt.Sprintf("flynn-host/%s %s-%s %s", version.String(), runtime.GOOS, runtime.GOARCH, name),
	}
}
Esempio n. 21
0
func (s *Scheduler) SendTelemetry() {
	if !s.IsLeader() || os.Getenv("TELEMETRY_DISABLED") == "true" {
		return
	}

	params := make(url.Values)
	params.Add("id_version", version.String())
	params.Add("id_bootstrap", os.Getenv("TELEMETRY_BOOTSTRAP_ID"))
	params.Add("id_cluster", os.Getenv("TELEMETRY_CLUSTER_ID"))

	params.Add("ct_hosts", strconv.Itoa(len(s.hosts)))

	var jobs int
	for _, j := range s.jobs {
		if j.State == JobStateRunning {
			jobs++
		}
	}
	params.Add("ct_running_jobs", strconv.Itoa(jobs))

	var formations int
	apps := make(map[string]struct{})
	dbs := make(map[string]map[string]struct{})
	providers := []string{"postgres", "mongodb", "mysql", "redis"}
	for _, p := range providers {
		dbs[p] = make(map[string]struct{})
	}
	for _, f := range s.formations {
		if f.App.Meta["flynn-system-app"] == "true" || f.GetProcesses().IsEmpty() {
			continue
		}
		formations++
		apps[f.App.ID] = struct{}{}

		for _, p := range providers {
			switch p {
			case "postgres":
				if _, ok := f.Release.Env["FLYNN_POSTGRES"]; !ok {
					continue
				}
				if db := f.Release.Env["PGDATABASE"]; db != "" {
					dbs[p][db] = struct{}{}
				}
			case "mongodb":
				if _, ok := f.Release.Env["FLYNN_MONGO"]; !ok {
					continue
				}
				if db := f.Release.Env["MONGO_DATABASE"]; db != "" {
					dbs[p][db] = struct{}{}
				}
			case "mysql":
				if _, ok := f.Release.Env["FLYNN_MYSQL"]; !ok {
					continue
				}
				if db := f.Release.Env["MYSQL_DATABASE"]; db != "" {
					dbs[p][db] = struct{}{}
				}
			case "redis":
				if db := f.Release.Env["FLYNN_REDIS"]; db != "" {
					dbs[p][db] = struct{}{}
				}
			}
		}
	}
	params.Add("ct_running_apps", strconv.Itoa(len(apps)))
	params.Add("ct_running_formations", strconv.Itoa(formations))
	for _, p := range providers {
		params.Add(fmt.Sprintf("ct_%s_dbs", p), strconv.Itoa(len(dbs[p])))
	}

	go func() {
		req, _ := http.NewRequest("GET", telemetryURL, nil)
		req.Header.Set("User-Agent", "flynn-scheduler/"+version.String())
		req.URL.RawQuery = params.Encode()

		for i := 0; i < 5; i++ {
			res, err := http.DefaultClient.Do(req)
			if res != nil {
				res.Body.Close()
			}
			if err == nil && res.StatusCode == 200 {
				return
			}
			time.Sleep(10 * time.Second)
		}
	}()
}
Esempio n. 22
0
func main() {
	defer shutdown.Exit()

	log.SetFlags(0)

	usage := `
usage: flynn [-a <app>] [-c <cluster>] <command> [<args>...]

Options:
	-a <app>
	-c <cluster>
	-h, --help

Commands:
	help      show usage for a specific command
	install   install flynn
	cluster   manage clusters
	create    create an app
	delete    delete an app
	apps      list apps
	ps        list jobs
	kill      kill a job
	log       get app log
	scale     change formation
	run       run a job
	env       manage env variables
	meta      manage app metadata
	route     manage routes
	pg        manage postgres database
	provider  manage resource providers
	resource  provision a new resource
	key       manage SSH public keys
	release   add a docker image release
	export    export app data
	import    create app from exported data
	version   show flynn version

See 'flynn help <command>' for more information on a specific command.
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), true)

	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else if cmdArgs[0] == "--json" {
			cmds := make(map[string]string)
			for name, cmd := range commands {
				cmds[name] = cmd.usage
			}
			out, err := json.MarshalIndent(cmds, "", "\t")
			if err != nil {
				shutdown.Fatal(err)
			}
			fmt.Println(string(out))
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = make([]string, 1)
			cmdArgs[0] = "--help"
		}
	}
	// Run the update command as early as possible to avoid the possibility of
	// installations being stranded without updates due to errors in other code
	if cmd == "update" {
		if err := runUpdate(); err != nil {
			shutdown.Fatal(err)
		}
		return
	} else {
		defer updater.backgroundRun() // doesn't run if os.Exit is called
	}

	// Set the cluster config name
	if args.String["-c"] != "" {
		flagCluster = args.String["-c"]
	}

	flagApp = args.String["-a"]
	if flagApp != "" {
		if err := readConfig(); err != nil {
			shutdown.Fatal(err)
		}

		if ra, err := appFromGitRemote(flagApp); err == nil {
			clusterConf = ra.Cluster
			flagApp = ra.Name
		}
	}

	if err := runCommand(cmd, cmdArgs); err != nil {
		log.Println(err)
		shutdown.ExitWithCode(1)
		return
	}
}
Esempio n. 23
0
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	httpPort := args.String["--http-port"]
	externalIP := args.String["--external-ip"]
	listenIP := args.String["--listen-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	tags := parseTagArgs(args.String["--tags"])
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	volProvider := args.String["--vol-provider"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]
	bridgeName := args.String["--bridge-name"]

	logger, err := setupLogger(logDir)
	if err != nil {
		shutdown.Fatalf("error setting up logger: %s", err)
	}

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}

	var maxJobConcurrency uint64 = 4
	if m, err := strconv.ParseUint(args.String["--max-job-concurrency"], 10, 64); err == nil {
		maxJobConcurrency = m
	}

	var partitionCGroups = make(map[string]int64) // name -> cpu shares
	for _, p := range strings.Split(args.String["--partitions"], " ") {
		nameShares := strings.Split(p, "=cpu_shares:")
		if len(nameShares) != 2 {
			shutdown.Fatalf("invalid partition specifier: %q", p)
		}
		shares, err := strconv.ParseInt(nameShares[1], 10, 64)
		if err != nil || shares < 2 {
			shutdown.Fatalf("invalid cpu shares specifier: %q", shares)
		}
		partitionCGroups[nameShares[0]] = shares
	}
	for _, s := range []string{"user", "system", "background"} {
		if _, ok := partitionCGroups[s]; !ok {
			shutdown.Fatalf("missing mandatory resource partition: %s", s)
		}
	}

	log := logger.New("fn", "runDaemon", "host.id", hostID)
	log.Info("starting daemon")

	log.Info("validating host ID")
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		log.Info("detecting external IP")
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			log.Error("error detecting external IP", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("using external IP " + externalIP)
	}

	publishAddr := net.JoinHostPort(externalIP, httpPort)
	if discoveryToken != "" {
		// TODO: retry
		log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID)
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			log.Error("error registering with cluster discovery service", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("registered with cluster discovery service", "id", discoveryID)
	}

	state := NewState(hostID, stateFile)
	shutdown.BeforeExit(func() { state.CloseDB() })

	log.Info("initializing volume manager", "provider", volProvider)
	var newVolProvider func() (volume.Provider, error)
	switch volProvider {
	case "zfs":
		newVolProvider = func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			log.Info("determining ZFS zpool size")
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			log.Info(fmt.Sprintf("using ZFS zpool size %d", size))

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		}
	case "mock":
		newVolProvider = func() (volume.Provider, error) { return nil, nil }
	default:
		shutdown.Fatalf("unknown volume provider: %q", volProvider)
	}
	vman := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		newVolProvider,
	)
	shutdown.BeforeExit(func() { vman.CloseDB() })

	mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux"))

	log.Info("initializing job backend", "type", backendName)
	var backend Backend
	switch backendName {
	case "libcontainer":
		backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer"))
	case "mock":
		backend = MockBackend{}
	default:
		shutdown.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)
	backend.SetDefaultEnv("LISTEN_IP", listenIP)

	var buffers host.LogBuffers
	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr, tags)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:  hostID,
		url: publishURL,
		status: &host.HostStatus{
			ID:      hostID,
			PID:     os.Getpid(),
			URL:     publishURL,
			Tags:    tags,
			Version: version.String(),
		},
		state:   state,
		backend: backend,
		vman:    vman,
		discMan: discoverdManager,
		log:     logger.New("host.id", hostID),

		maxJobConcurrency: maxJobConcurrency,
	}
	backend.SetHost(host)

	// restore the host status if set in the environment
	if statusEnv := os.Getenv("FLYNN_HOST_STATUS"); statusEnv != "" {
		log.Info("restoring host status from parent")
		if err := json.Unmarshal([]byte(statusEnv), &host.status); err != nil {
			log.Error("error restoring host status from parent", "err", err)
			shutdown.Fatal(err)
		}
		pid := os.Getpid()
		log.Info("setting status PID", "pid", pid)
		host.status.PID = pid
		// keep the same tags as the parent
		discoverdManager.UpdateTags(host.status.Tags)
	}

	log.Info("creating HTTP listener")
	l, err := newHTTPListener(net.JoinHostPort(listenIP, httpPort))
	if err != nil {
		log.Error("error creating HTTP listener", "err", err)
		shutdown.Fatal(err)
	}
	host.listener = l
	shutdown.BeforeExit(func() { host.Close() })

	// if we have a control socket FD, wait for a "resume" message before
	// opening state DBs and serving requests.
	var controlFD int
	if fdEnv := os.Getenv("FLYNN_CONTROL_FD"); fdEnv != "" {
		log.Info("parsing control socket file descriptor")
		controlFD, err = strconv.Atoi(fdEnv)
		if err != nil {
			log.Error("error parsing control socket file descriptor", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("waiting for resume message from parent")
		msg := make([]byte, len(ControlMsgResume))
		if _, err := syscall.Read(controlFD, msg); err != nil {
			log.Error("error waiting for resume message from parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("validating resume message")
		if !bytes.Equal(msg, ControlMsgResume) {
			log.Error(fmt.Sprintf("unexpected resume message from parent: %v", msg))
			shutdown.ExitWithCode(1)
		}

		log.Info("receiving log buffers from parent")
		if err := json.NewDecoder(&controlSock{controlFD}).Decode(&buffers); err != nil {
			log.Error("error receiving log buffers from parent", "err", err)
			shutdown.Fatal(err)
		}
	}

	log.Info("opening state databases")
	if err := host.OpenDBs(); err != nil {
		log.Error("error opening state databases", "err", err)
		shutdown.Fatal(err)
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		log.Info("stopping all jobs except discoverd")
		if err := backend.Cleanup(except); err != nil {
			log.Error("error stopping all jobs except discoverd", "err", err)
			return err
		}
		for _, id := range except {
			log.Info("stopping discoverd")
			if e := backend.Stop(id); e != nil {
				log.Error("error stopping discoverd", "err", err)
				err = e
			}
		}
		return
	}

	log.Info("restoring state")
	resurrect, err := state.Restore(backend, buffers)
	if err != nil {
		log.Error("error restoring state", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		log.Info("unregistering with service discovery")
		if err := discoverdManager.Close(); err != nil {
			log.Error("error unregistering with service discovery", "err", err)
		}
		stopJobs()
	})

	log.Info("serving HTTP requests")
	host.ServeHTTP()

	if controlFD > 0 {
		// now that we are serving requests, send an "ok" message to the parent
		log.Info("sending ok message to parent")
		if _, err := syscall.Write(controlFD, ControlMsgOK); err != nil {
			log.Error("error sending ok message to parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("closing control socket")
		if err := syscall.Close(controlFD); err != nil {
			log.Error("error closing control socket", "err", err)
		}
	}

	if force {
		log.Info("forcibly stopping existing jobs")
		if err := stopJobs(); err != nil {
			log.Error("error forcibly stopping existing jobs", "err", err)
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		log.Info("getting cluster peer IPs")
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			log.Error("error getting discovery cluster", "err", err)
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
		log.Info("got cluster peer IPs", "peers", peerIPs)
	}
	log.Info("connecting to cluster peers")
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		log.Info("no cluster peers available")
	}

	if !args.Bool["--no-resurrect"] {
		log.Info("resurrecting jobs")
		resurrect()
	}

	monitor := NewMonitor(host.discMan, externalIP, logger)
	shutdown.BeforeExit(func() { monitor.Shutdown() })
	go monitor.Run()

	log.Info("blocking main goroutine")
	<-make(chan struct{})
}
Esempio n. 24
0
func main() {
	// when starting a container with libcontainer, we first exec the
	// current binary with libcontainer-init as the first argument,
	// which triggers the following code to initialise the container
	// environment (namespaces, network etc.) then exec containerinit
	if len(os.Args) > 1 && os.Args[1] == "libcontainer-init" {
		runtime.GOMAXPROCS(1)
		runtime.LockOSThread()
		factory, _ := libcontainer.New("")
		if err := factory.StartInitialization(); err != nil {
			log.Fatal(err)
		}
	}

	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  signal                     Signal a job
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  list                       Lists ID and IP of each host
  version                    Show current version
  fix                        Fix a broken cluster
  tags                       Manage flynn-host daemon tags
  discover                   Return low-level information about a service

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				shutdown.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				shutdown.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		} else if _, ok := err.(cli.ErrAlreadyLogged); ok {
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
Esempio n. 25
0
func main() {
	usage := `
Usage: taffy <app> <repo> <branch> <rev> [-e <var>=<val>]... [-m <key>=<val>]...

Options:
	-e,--env <var>=<val>
	-m,--meta <key>=<val>
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), false)

	app := args.String["<app>"]
	repo := args.String["<repo>"]
	branch := args.String["<branch>"]
	rev := args.String["<rev>"]

	clientKey := os.Getenv("SSH_CLIENT_KEY")
	clientHosts := os.Getenv("SSH_CLIENT_HOSTS")
	homeFolder := os.Getenv("HOME")

	meta := map[string]string{
		"git":       "true",
		"clone_url": repo,
		"branch":    branch,
		"rev":       rev,
		"taffy_job": os.Getenv("FLYNN_JOB_ID"),
	}

	if homeFolder == "" || homeFolder == "/" {
		homeFolder = "/root"
	}

	if clientKey != "" {
		ensureDir(homeFolder)
		if err := ioutil.WriteFile(filepath.Join(homeFolder, ".ssh", "id_rsa"), []byte(clientKey), 0600); err != nil {
			log.Fatal(err)
		}
	}

	if clientHosts != "" {
		ensureDir(homeFolder)
		if err := ioutil.WriteFile(filepath.Join(homeFolder, ".ssh", "known_hosts"), []byte(clientHosts), 0600); err != nil {
			log.Fatal(err)
		}
	}

	env, err := parsePairs(args, "--env")
	if err != nil {
		log.Fatal(err)
	}
	m, err := parsePairs(args, "--meta")
	if err != nil {
		log.Fatal(err)
	}
	for k, v := range m {
		meta[k] = v
	}

	if err := cloneRepo(repo, branch); err != nil {
		log.Fatal(err)
	}
	if err := runReceiver(app, rev, env, meta); err != nil {
		log.Fatal(err)
	}
}