コード例 #1
0
ファイル: destroy-volumes.go プロジェクト: imjorge/flynn
func runVolumeDestroy(args *docopt.Args) error {
	if os.Getuid() != 0 {
		fmt.Println("this command requires root!\ntry again with `sudo flynn-host destroy-volumes`.")
		shutdown.ExitWithCode(1)
	}

	volPath := args.String["--volpath"]
	includeData := args.Bool["--include-data"]
	keepSystemImages := args.Bool["--keep-system-images"]

	volumeDBPath := filepath.Join(volPath, "volumes.bolt")

	// if there is no state db, nothing to do
	if _, err := os.Stat(volumeDBPath); err != nil && os.IsNotExist(err) {
		fmt.Printf("no volume state db exists at %q; already clean.\n", volumeDBPath)
		shutdown.Exit()
	}

	// open state db.  we're maybe using it; and regardless want to flock before removing it
	vman, vmanErr := loadVolumeState(volumeDBPath)

	// if '--include-data' specified and vman loaded, destroy volumes
	allVolumesDestroyed := true
	if vmanErr != nil {
		fmt.Printf("%s\n", vmanErr)
	} else if includeData == false {
		fmt.Println("'--include-data' not specified; leaving backend data storage intact.")
	} else {
		if err := destroyVolumes(vman, keepSystemImages); err != nil {
			fmt.Printf("%s\n", err)
			allVolumesDestroyed = false
		}
	}

	if !keepSystemImages {
		// remove db file
		if err := os.Remove(volumeDBPath); err != nil {
			fmt.Printf("could not remove volume state db file %q: %s.\n", volumeDBPath, err)
			shutdown.ExitWithCode(5)
		}
		fmt.Printf("state db file %q removed.\n", volumeDBPath)
	}

	// exit code depends on if all volumes were destroyed successfully or not
	if includeData && !allVolumesDestroyed {
		shutdown.ExitWithCode(6)
	}
	shutdown.Exit()
	return nil
}
コード例 #2
0
ファイル: blobstore.go プロジェクト: imjorge/flynn
func main() {
	defer shutdown.Exit()

	usage := `
usage: flynn-blobstore <command> [<args>...]

Commands:
        help        show usage for a specific command
        cleanup     delete file blobs from default backend 
        migrate     move file blobs from default backend to a different backend
        server      run blobstore HTTP server

See 'flynn-blobstore help <command>' for more information on a specific command.
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), true)

	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn-blobstore help`
			fmt.Println(usage)
			return
		} else { // `flynn-blobstore help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if err := runCommand(cmd, cmdArgs); err != nil {
		log.Println(err)
		shutdown.ExitWithCode(1)
	}
}
コード例 #3
0
ファイル: main.go プロジェクト: GrimDerp/flynn
func mustApp() string {
	name, err := app()
	if err != nil {
		log.Println(err)
		shutdown.ExitWithCode(1)
	}
	return name
}
コード例 #4
0
ファイル: process.go プロジェクト: n1rvana/flynn
// monitorCmd waits for cmd to finish and reports an error if it was unexpected.
// Also closes the stopped channel to notify other listeners that it has finished.
func (p *Process) monitorCmd(cmd *exec.Cmd, stopped chan struct{}) {
	err := cmd.Wait()
	if !p.stopping.Load().(bool) {
		p.Logger.Error("unexpectedly exit", "err", err)
		shutdown.ExitWithCode(1)
	}
	close(stopped)
}
コード例 #5
0
ファイル: process.go プロジェクト: yanghongkjxy/flynn
func (p *Process) start() error {
	logger := p.Logger.New("fn", "start", "id", p.ID, "port", p.Port)
	logger.Info("starting process")

	cmd := NewCmd(exec.Command(filepath.Join(p.SbinDir, "mysqld"), "--defaults-extra-file="+p.ConfigPath()))
	if err := cmd.Start(); err != nil {
		logger.Error("failed to start process", "err", err)
		return err
	}
	p.cmd = cmd
	p.runningValue.Store(true)

	go func() {
		if <-cmd.Stopped(); cmd.Err() != nil {
			logger.Error("process unexpectedly exit", "err", cmd.Err())
			shutdown.ExitWithCode(1)
		}
	}()

	logger.Debug("waiting for process to start")

	timer := time.NewTimer(p.OpTimeout)
	defer timer.Stop()

	for {
		// Connect to server.
		// Retry after sleep if an error occurs.
		if err := func() error {
			db, err := p.connectLocal()
			if err != nil {
				return err
			}
			defer db.Close()

			if _, err := db.Exec("SELECT 1"); err != nil {
				return err
			}

			return nil
		}(); err != nil {
			select {
			case <-timer.C:
				logger.Error("timed out waiting for process to start", "err", err)
				if err := p.stop(); err != nil {
					logger.Error("error stopping process", "err", err)
				}
				return err
			default:
				logger.Debug("ignoring error connecting to mysql", "err", err)
				time.Sleep(checkInterval)
				continue
			}
		}

		return nil
	}
}
コード例 #6
0
ファイル: destroy-volumes.go プロジェクト: ably-forks/flynn
func loadVolumeState(volumeDBPath string) (*volumemanager.Manager, error) {
	// attempt to restore manager from state db
	// no need to defer closing the db; we're about to unlink it and the fd can drop on exit
	fmt.Println("opening volume state db...")
	vman := volumemanager.New(
		volumeDBPath,
		func() (volume.Provider, error) {
			return nil, nil
		},
	)
	if err := vman.OpenDB(); err != nil {
		if strings.HasSuffix(err.Error(), "timeout") { //bolt.ErrTimeout
			fmt.Println("volume state db is locked by another process; aborting.")
			shutdown.ExitWithCode(4)
		}
		return nil, fmt.Errorf("warning: the previous volume database could not be loaded; any data in backends may need manual removal\n  (error was: %s)", err)
	}
	fmt.Println("volume state db opened.")
	return vman, nil
}
コード例 #7
0
ファイル: main.go プロジェクト: GrimDerp/flynn
func main() {
	defer shutdown.Exit()

	log.SetFlags(0)

	usage := `
usage: flynn [-a <app>] [-c <cluster>] <command> [<args>...]

Options:
	-a <app>
	-c <cluster>
	-h, --help

Commands:
	help      show usage for a specific command
	install   install flynn
	cluster   manage clusters
	create    create an app
	delete    delete an app
	apps      list apps
	ps        list jobs
	kill      kill a job
	log       get app log
	scale     change formation
	run       run a job
	env       manage env variables
	meta      manage app metadata
	route     manage routes
	pg        manage postgres database
	provider  manage resource providers
	resource  provision a new resource
	key       manage SSH public keys
	release   add a docker image release
	export    export app data
	import    create app from exported data
	version   show flynn version

See 'flynn help <command>' for more information on a specific command.
`[1:]
	args, _ := docopt.Parse(usage, nil, true, version.String(), true)

	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else if cmdArgs[0] == "--json" {
			cmds := make(map[string]string)
			for name, cmd := range commands {
				cmds[name] = cmd.usage
			}
			out, err := json.MarshalIndent(cmds, "", "\t")
			if err != nil {
				shutdown.Fatal(err)
			}
			fmt.Println(string(out))
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = make([]string, 1)
			cmdArgs[0] = "--help"
		}
	}
	// Run the update command as early as possible to avoid the possibility of
	// installations being stranded without updates due to errors in other code
	if cmd == "update" {
		if err := runUpdate(); err != nil {
			shutdown.Fatal(err)
		}
		return
	} else {
		defer updater.backgroundRun() // doesn't run if os.Exit is called
	}

	// Set the cluster config name
	if args.String["-c"] != "" {
		flagCluster = args.String["-c"]
	}

	flagApp = args.String["-a"]
	if flagApp != "" {
		if err := readConfig(); err != nil {
			shutdown.Fatal(err)
		}

		if ra, err := appFromGitRemote(flagApp); err == nil {
			clusterConf = ra.Cluster
			flagApp = ra.Name
		}
	}

	if err := runCommand(cmd, cmdArgs); err != nil {
		log.Println(err)
		shutdown.ExitWithCode(1)
		return
	}
}
コード例 #8
0
ファイル: host.go プロジェクト: justintung/flynn
func main() {
	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  version                    Show current version

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				log.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				log.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
コード例 #9
0
ファイル: run.go プロジェクト: ably-forks/flynn
func runJob(client controller.Client, config runConfig) error {
	req := &ct.NewJob{
		Args:       config.Args,
		TTY:        config.Stdin == nil && config.Stdout == nil && term.IsTerminal(os.Stdin.Fd()) && term.IsTerminal(os.Stdout.Fd()) && !config.Detached,
		ReleaseID:  config.Release,
		Env:        config.Env,
		ReleaseEnv: config.ReleaseEnv,
		DisableLog: config.DisableLog,
	}

	// ensure slug apps from old clusters use /runner/init
	release, err := client.GetRelease(req.ReleaseID)
	if err != nil {
		return err
	}
	if release.IsGitDeploy() && (len(req.Args) == 0 || req.Args[0] != "/runner/init") {
		req.Args = append([]string{"/runner/init"}, req.Args...)
	}

	// set deprecated Entrypoint and Cmd for old clusters
	if len(req.Args) > 0 {
		req.DeprecatedEntrypoint = []string{req.Args[0]}
	}
	if len(req.Args) > 1 {
		req.DeprecatedCmd = req.Args[1:]
	}

	if config.Stdin == nil {
		config.Stdin = os.Stdin
	}
	if config.Stdout == nil {
		config.Stdout = os.Stdout
	}
	if config.Stderr == nil {
		config.Stderr = os.Stderr
	}
	if req.TTY {
		if req.Env == nil {
			req.Env = make(map[string]string)
		}
		ws, err := term.GetWinsize(os.Stdin.Fd())
		if err != nil {
			return err
		}
		req.Columns = int(ws.Width)
		req.Lines = int(ws.Height)
		req.Env["COLUMNS"] = strconv.Itoa(int(ws.Width))
		req.Env["LINES"] = strconv.Itoa(int(ws.Height))
		req.Env["TERM"] = os.Getenv("TERM")
	}

	if config.Detached {
		job, err := client.RunJobDetached(config.App, req)
		if err != nil {
			return err
		}
		log.Println(job.ID)
		return nil
	}

	rwc, err := client.RunJobAttached(config.App, req)
	if err != nil {
		return err
	}
	defer rwc.Close()
	attachClient := cluster.NewAttachClient(rwc)

	var termState *term.State
	if req.TTY {
		termState, err = term.MakeRaw(os.Stdin.Fd())
		if err != nil {
			return err
		}
		// Restore the terminal if we return without calling os.Exit
		defer term.RestoreTerminal(os.Stdin.Fd(), termState)
		go func() {
			ch := make(chan os.Signal, 1)
			signal.Notify(ch, SIGWINCH)
			for range ch {
				ws, err := term.GetWinsize(os.Stdin.Fd())
				if err != nil {
					return
				}
				attachClient.ResizeTTY(ws.Height, ws.Width)
				attachClient.Signal(int(SIGWINCH))
			}
		}()
	}

	go func() {
		ch := make(chan os.Signal, 1)
		signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
		sig := <-ch
		attachClient.Signal(int(sig.(syscall.Signal)))
		time.Sleep(10 * time.Second)
		attachClient.Signal(int(syscall.SIGKILL))
	}()

	go func() {
		io.Copy(attachClient, config.Stdin)
		attachClient.CloseWrite()
	}()

	childDone := make(chan struct{})
	shutdown.BeforeExit(func() {
		<-childDone
	})
	exitStatus, err := attachClient.Receive(config.Stdout, config.Stderr)
	close(childDone)
	if err != nil {
		return err
	}
	if req.TTY {
		term.RestoreTerminal(os.Stdin.Fd(), termState)
	}
	if config.Exit {
		shutdown.ExitWithCode(exitStatus)
	}
	if exitStatus != 0 {
		return RunExitError(exitStatus)
	}
	return nil
}
コード例 #10
0
ファイル: postgres.go プロジェクト: devick/flynn
func (p *Postgres) start() error {
	log := p.log.New("fn", "start", "data_dir", p.dataDir, "bin_dir", p.binDir)
	log.Info("starting postgres")

	// clear stale pid if it exists
	os.Remove(filepath.Join(p.dataDir, "postmaster.pid"))

	p.expectExit.Store(false)
	p.daemonExit = make(chan struct{})

	cmd := exec.Command(p.binPath("postgres"), "-D", p.dataDir)
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr
	if err := cmd.Start(); err != nil {
		log.Error("failed to start postgres", "err", err)
		return err
	}
	p.daemon = cmd
	p.setRunning(true)

	go func() {
		err := cmd.Wait()
		if !p.expectExit.Load().(bool) {
			p.log.Error("postgres unexpectedly exit", "err", err)
			shutdown.ExitWithCode(1)
		}
		close(p.daemonExit)
	}()

	log.Debug("waiting for postgres to start")
	startTime := time.Now().UTC()
	var err error
	for {
		if time.Now().Sub(startTime) > p.opTimeout {
			log.Error("timed out waiting for postgres to start", "err", err)
			if err := p.stop(); err != nil {
				log.Error("error stopping postgres", "err", err)
			}
			return err
		}

		port, _ := strconv.Atoi(p.port)
		c := pgx.ConnPoolConfig{
			ConnConfig: pgx.ConnConfig{
				Host: "127.0.0.1",
				User: "******",
				Port: uint16(port),
			},
		}
		p.dbMtx.Lock()
		p.db, err = pgx.NewConnPool(c)
		p.dbMtx.Unlock()
		if err == nil {
			_, err = p.db.Exec("SELECT 1")
			if err == nil {
				log.Info("postgres started")
				return nil
			}
		}

		log.Debug("ignoring error connecting to postgres", "err", err)
		time.Sleep(checkInterval)
	}
}
コード例 #11
0
ファイル: host.go プロジェクト: ably-forks/flynn
func main() {
	// when starting a container with libcontainer, we first exec the
	// current binary with libcontainer-init as the first argument,
	// which triggers the following code to initialise the container
	// environment (namespaces, network etc.) then exec containerinit
	if len(os.Args) > 1 && os.Args[1] == "libcontainer-init" {
		runtime.GOMAXPROCS(1)
		runtime.LockOSThread()
		factory, _ := libcontainer.New("")
		if err := factory.StartInitialization(); err != nil {
			log.Fatal(err)
		}
	}

	defer shutdown.Exit()

	usage := `usage: flynn-host [-h|--help] [--version] <command> [<args>...]

Options:
  -h, --help                 Show this message
  --version                  Show current version

Commands:
  help                       Show usage for a specific command
  init                       Create cluster configuration for daemon
  daemon                     Start the daemon
  update                     Update Flynn components
  download                   Download container images
  bootstrap                  Bootstrap layer 1
  inspect                    Get low-level information about a job
  log                        Get the logs of a job
  ps                         List jobs
  stop                       Stop running jobs
  signal                     Signal a job
  destroy-volumes            Destroys the local volume database
  collect-debug-info         Collect debug information into an anonymous gist or tarball
  list                       Lists ID and IP of each host
  version                    Show current version
  fix                        Fix a broken cluster
  tags                       Manage flynn-host daemon tags
  discover                   Return low-level information about a service

See 'flynn-host help <command>' for more information on a specific command.
`

	args, _ := docopt.Parse(usage, nil, true, version.String(), true)
	cmd := args.String["<command>"]
	cmdArgs := args.All["<args>"].([]string)

	if cmd == "help" {
		if len(cmdArgs) == 0 { // `flynn help`
			fmt.Println(usage)
			return
		} else { // `flynn help <command>`
			cmd = cmdArgs[0]
			cmdArgs = []string{"--help"}
		}
	}

	if cmd == "daemon" {
		// merge in args and env from config file, if available
		var c *config.Config
		if n := os.Getenv("FLYNN_HOST_CONFIG"); n != "" {
			var err error
			c, err = config.Open(n)
			if err != nil {
				shutdown.Fatalf("error opening config file %s: %s", n, err)
			}
		} else {
			var err error
			c, err = config.Open(configFile)
			if err != nil && !os.IsNotExist(err) {
				shutdown.Fatalf("error opening config file %s: %s", configFile, err)
			}
			if c == nil {
				c = &config.Config{}
			}
		}
		cmdArgs = append(cmdArgs, c.Args...)
		for k, v := range c.Env {
			os.Setenv(k, v)
		}
	}

	if err := cli.Run(cmd, cmdArgs); err != nil {
		if err == cli.ErrInvalidCommand {
			fmt.Printf("ERROR: %q is not a valid command\n\n", cmd)
			fmt.Println(usage)
			shutdown.ExitWithCode(1)
		} else if _, ok := err.(cli.ErrAlreadyLogged); ok {
			shutdown.ExitWithCode(1)
		}
		shutdown.Fatal(err)
	}
}
コード例 #12
0
ファイル: host.go プロジェクト: ably-forks/flynn
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	httpPort := args.String["--http-port"]
	externalIP := args.String["--external-ip"]
	listenIP := args.String["--listen-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	tags := parseTagArgs(args.String["--tags"])
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	volProvider := args.String["--vol-provider"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]
	bridgeName := args.String["--bridge-name"]

	logger, err := setupLogger(logDir)
	if err != nil {
		shutdown.Fatalf("error setting up logger: %s", err)
	}

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}

	var maxJobConcurrency uint64 = 4
	if m, err := strconv.ParseUint(args.String["--max-job-concurrency"], 10, 64); err == nil {
		maxJobConcurrency = m
	}

	var partitionCGroups = make(map[string]int64) // name -> cpu shares
	for _, p := range strings.Split(args.String["--partitions"], " ") {
		nameShares := strings.Split(p, "=cpu_shares:")
		if len(nameShares) != 2 {
			shutdown.Fatalf("invalid partition specifier: %q", p)
		}
		shares, err := strconv.ParseInt(nameShares[1], 10, 64)
		if err != nil || shares < 2 {
			shutdown.Fatalf("invalid cpu shares specifier: %q", shares)
		}
		partitionCGroups[nameShares[0]] = shares
	}
	for _, s := range []string{"user", "system", "background"} {
		if _, ok := partitionCGroups[s]; !ok {
			shutdown.Fatalf("missing mandatory resource partition: %s", s)
		}
	}

	log := logger.New("fn", "runDaemon", "host.id", hostID)
	log.Info("starting daemon")

	log.Info("validating host ID")
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		log.Info("detecting external IP")
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			log.Error("error detecting external IP", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("using external IP " + externalIP)
	}

	publishAddr := net.JoinHostPort(externalIP, httpPort)
	if discoveryToken != "" {
		// TODO: retry
		log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID)
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			log.Error("error registering with cluster discovery service", "err", err)
			shutdown.Fatal(err)
		}
		log.Info("registered with cluster discovery service", "id", discoveryID)
	}

	state := NewState(hostID, stateFile)
	shutdown.BeforeExit(func() { state.CloseDB() })

	log.Info("initializing volume manager", "provider", volProvider)
	var newVolProvider func() (volume.Provider, error)
	switch volProvider {
	case "zfs":
		newVolProvider = func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			log.Info("determining ZFS zpool size")
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			log.Info(fmt.Sprintf("using ZFS zpool size %d", size))

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		}
	case "mock":
		newVolProvider = func() (volume.Provider, error) { return nil, nil }
	default:
		shutdown.Fatalf("unknown volume provider: %q", volProvider)
	}
	vman := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		newVolProvider,
	)
	shutdown.BeforeExit(func() { vman.CloseDB() })

	mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux"))

	log.Info("initializing job backend", "type", backendName)
	var backend Backend
	switch backendName {
	case "libcontainer":
		backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer"))
	case "mock":
		backend = MockBackend{}
	default:
		shutdown.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)
	backend.SetDefaultEnv("LISTEN_IP", listenIP)

	var buffers host.LogBuffers
	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr, tags)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:  hostID,
		url: publishURL,
		status: &host.HostStatus{
			ID:      hostID,
			PID:     os.Getpid(),
			URL:     publishURL,
			Tags:    tags,
			Version: version.String(),
		},
		state:   state,
		backend: backend,
		vman:    vman,
		discMan: discoverdManager,
		log:     logger.New("host.id", hostID),

		maxJobConcurrency: maxJobConcurrency,
	}
	backend.SetHost(host)

	// restore the host status if set in the environment
	if statusEnv := os.Getenv("FLYNN_HOST_STATUS"); statusEnv != "" {
		log.Info("restoring host status from parent")
		if err := json.Unmarshal([]byte(statusEnv), &host.status); err != nil {
			log.Error("error restoring host status from parent", "err", err)
			shutdown.Fatal(err)
		}
		pid := os.Getpid()
		log.Info("setting status PID", "pid", pid)
		host.status.PID = pid
		// keep the same tags as the parent
		discoverdManager.UpdateTags(host.status.Tags)
	}

	log.Info("creating HTTP listener")
	l, err := newHTTPListener(net.JoinHostPort(listenIP, httpPort))
	if err != nil {
		log.Error("error creating HTTP listener", "err", err)
		shutdown.Fatal(err)
	}
	host.listener = l
	shutdown.BeforeExit(func() { host.Close() })

	// if we have a control socket FD, wait for a "resume" message before
	// opening state DBs and serving requests.
	var controlFD int
	if fdEnv := os.Getenv("FLYNN_CONTROL_FD"); fdEnv != "" {
		log.Info("parsing control socket file descriptor")
		controlFD, err = strconv.Atoi(fdEnv)
		if err != nil {
			log.Error("error parsing control socket file descriptor", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("waiting for resume message from parent")
		msg := make([]byte, len(ControlMsgResume))
		if _, err := syscall.Read(controlFD, msg); err != nil {
			log.Error("error waiting for resume message from parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("validating resume message")
		if !bytes.Equal(msg, ControlMsgResume) {
			log.Error(fmt.Sprintf("unexpected resume message from parent: %v", msg))
			shutdown.ExitWithCode(1)
		}

		log.Info("receiving log buffers from parent")
		if err := json.NewDecoder(&controlSock{controlFD}).Decode(&buffers); err != nil {
			log.Error("error receiving log buffers from parent", "err", err)
			shutdown.Fatal(err)
		}
	}

	log.Info("opening state databases")
	if err := host.OpenDBs(); err != nil {
		log.Error("error opening state databases", "err", err)
		shutdown.Fatal(err)
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		log.Info("stopping all jobs except discoverd")
		if err := backend.Cleanup(except); err != nil {
			log.Error("error stopping all jobs except discoverd", "err", err)
			return err
		}
		for _, id := range except {
			log.Info("stopping discoverd")
			if e := backend.Stop(id); e != nil {
				log.Error("error stopping discoverd", "err", err)
				err = e
			}
		}
		return
	}

	log.Info("restoring state")
	resurrect, err := state.Restore(backend, buffers)
	if err != nil {
		log.Error("error restoring state", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		log.Info("unregistering with service discovery")
		if err := discoverdManager.Close(); err != nil {
			log.Error("error unregistering with service discovery", "err", err)
		}
		stopJobs()
	})

	log.Info("serving HTTP requests")
	host.ServeHTTP()

	if controlFD > 0 {
		// now that we are serving requests, send an "ok" message to the parent
		log.Info("sending ok message to parent")
		if _, err := syscall.Write(controlFD, ControlMsgOK); err != nil {
			log.Error("error sending ok message to parent", "err", err)
			shutdown.Fatal(err)
		}

		log.Info("closing control socket")
		if err := syscall.Close(controlFD); err != nil {
			log.Error("error closing control socket", "err", err)
		}
	}

	if force {
		log.Info("forcibly stopping existing jobs")
		if err := stopJobs(); err != nil {
			log.Error("error forcibly stopping existing jobs", "err", err)
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		log.Info("getting cluster peer IPs")
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			log.Error("error getting discovery cluster", "err", err)
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
		log.Info("got cluster peer IPs", "peers", peerIPs)
	}
	log.Info("connecting to cluster peers")
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		log.Info("no cluster peers available")
	}

	if !args.Bool["--no-resurrect"] {
		log.Info("resurrecting jobs")
		resurrect()
	}

	monitor := NewMonitor(host.discMan, externalIP, logger)
	shutdown.BeforeExit(func() { monitor.Shutdown() })
	go monitor.Run()

	log.Info("blocking main goroutine")
	<-make(chan struct{})
}
コード例 #13
0
ファイル: run.go プロジェクト: josephwinston/flynn
func runJob(client *controller.Client, config runConfig) error {
	req := &ct.NewJob{
		Cmd:        config.Args,
		TTY:        config.Stdin == nil && config.Stdout == nil && term.IsTerminal(os.Stdin.Fd()) && term.IsTerminal(os.Stdout.Fd()) && !config.Detached,
		ReleaseID:  config.Release,
		Entrypoint: config.Entrypoint,
		Env:        config.Env,
		ReleaseEnv: config.ReleaseEnv,
		DisableLog: config.DisableLog,
	}
	if config.Stdin == nil {
		config.Stdin = os.Stdin
	}
	if config.Stdout == nil {
		config.Stdout = os.Stdout
	}
	if config.Stderr == nil {
		config.Stderr = os.Stderr
	}
	if req.TTY {
		if req.Env == nil {
			req.Env = make(map[string]string)
		}
		ws, err := term.GetWinsize(os.Stdin.Fd())
		if err != nil {
			return err
		}
		req.Columns = int(ws.Width)
		req.Lines = int(ws.Height)
		req.Env["COLUMNS"] = strconv.Itoa(int(ws.Width))
		req.Env["LINES"] = strconv.Itoa(int(ws.Height))
		req.Env["TERM"] = os.Getenv("TERM")
	}

	if config.Detached {
		job, err := client.RunJobDetached(config.App, req)
		if err != nil {
			return err
		}
		log.Println(job.ID)
		return nil
	}

	rwc, err := client.RunJobAttached(config.App, req)
	if err != nil {
		return err
	}
	defer rwc.Close()
	attachClient := cluster.NewAttachClient(rwc)

	var termState *term.State
	if req.TTY {
		termState, err = term.MakeRaw(os.Stdin.Fd())
		if err != nil {
			return err
		}
		// Restore the terminal if we return without calling os.Exit
		defer term.RestoreTerminal(os.Stdin.Fd(), termState)
		go func() {
			ch := make(chan os.Signal, 1)
			signal.Notify(ch, SIGWINCH)
			for range ch {
				ws, err := term.GetWinsize(os.Stdin.Fd())
				if err != nil {
					return
				}
				attachClient.ResizeTTY(ws.Height, ws.Width)
				attachClient.Signal(int(SIGWINCH))
			}
		}()
	}

	go func() {
		ch := make(chan os.Signal, 1)
		signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
		sig := <-ch
		attachClient.Signal(int(sig.(syscall.Signal)))
		time.Sleep(10 * time.Second)
		attachClient.Signal(int(syscall.SIGKILL))
	}()

	go func() {
		io.Copy(attachClient, config.Stdin)
		attachClient.CloseWrite()
	}()

	childDone := make(chan struct{})
	shutdown.BeforeExit(func() {
		<-childDone
	})
	exitStatus, err := attachClient.Receive(config.Stdout, config.Stderr)
	close(childDone)
	if err != nil {
		return err
	}
	if req.TTY {
		term.RestoreTerminal(os.Stdin.Fd(), termState)
	}
	shutdown.ExitWithCode(exitStatus)

	panic("unreached")
}