コード例 #1
0
ファイル: main.go プロジェクト: johan--/flynn
func main() {
	grohl.AddContext("app", "controller-scheduler")
	grohl.Log(grohl.Data{"at": "start"})

	cc, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		log.Fatal(err)
	}
	cl, err := cluster.NewClient()
	if err != nil {
		log.Fatal(err)
	}
	c := newContext(cc, cl)

	grohl.Log(grohl.Data{"at": "leaderwait"})
	leaderWait, err := discoverd.RegisterAndStandby("flynn-controller-scheduler", ":"+os.Getenv("PORT"), nil)
	if err != nil {
		log.Fatal(err)
	}
	<-leaderWait
	grohl.Log(grohl.Data{"at": "leader"})

	// TODO: periodic full cluster sync for anti-entropy
	c.watchFormations(nil, nil)
}
コード例 #2
0
ファイル: main.go プロジェクト: ericcapricorn/flynn
func main() {
	grohl.AddContext("app", "controller-scheduler")
	grohl.Log(grohl.Data{"at": "start"})

	if period := os.Getenv("BACKOFF_PERIOD"); period != "" {
		var err error
		backoffPeriod, err = time.ParseDuration(period)
		if err != nil {
			log.Fatal(err)
		}
		grohl.Log(grohl.Data{"at": "backoff_period", "period": backoffPeriod.String()})
	}

	cc, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		log.Fatal(err)
	}
	cl, err := cluster.NewClient()
	if err != nil {
		log.Fatal(err)
	}
	c := newContext(cc, cl)

	grohl.Log(grohl.Data{"at": "leaderwait"})
	leaderWait, err := discoverd.RegisterAndStandby("flynn-controller-scheduler", ":"+os.Getenv("PORT"), nil)
	if err != nil {
		log.Fatal(err)
	}
	<-leaderWait
	grohl.Log(grohl.Data{"at": "leader"})

	// TODO: periodic full cluster sync for anti-entropy
	c.watchFormations()
}
コード例 #3
0
ファイル: host.go プロジェクト: nightscape/flynn
func syncScheduler(scheduler sampiSyncClient, events <-chan host.Event) {
	for event := range events {
		if event.Event != "stop" {
			continue
		}
		grohl.Log(grohl.Data{"fn": "scheduler_event", "at": "remove_job", "job.id": event.JobID})
		if err := scheduler.RemoveJobs([]string{event.JobID}); err != nil {
			grohl.Log(grohl.Data{"fn": "scheduler_event", "at": "remove_job", "status": "error", "err": err, "job.id": event.JobID})
		}
	}
}
コード例 #4
0
ファイル: main.go プロジェクト: kuntenz/flynn
func main() {
	defer shutdown.Exit()

	grohl.AddContext("app", "controller-scheduler")
	grohl.Log(grohl.Data{"at": "start"})

	go startHTTPServer()

	if period := os.Getenv("BACKOFF_PERIOD"); period != "" {
		var err error
		backoffPeriod, err = time.ParseDuration(period)
		if err != nil {
			shutdown.Fatal(err)
		}
		grohl.Log(grohl.Data{"at": "backoff_period", "period": backoffPeriod.String()})
	}

	cc, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		shutdown.Fatal(err)
	}
	c := newContext(cc, cluster.NewClient())

	c.watchHosts()

	grohl.Log(grohl.Data{"at": "leaderwait"})
	hb, err := discoverd.AddServiceAndRegister("controller-scheduler", ":"+os.Getenv("PORT"))
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	leaders := make(chan *discoverd.Instance)
	stream, err := discoverd.NewService("controller-scheduler").Leaders(leaders)
	if err != nil {
		shutdown.Fatal(err)
	}
	for leader := range leaders {
		if leader.Addr == hb.Addr() {
			break
		}
	}
	if err := stream.Err(); err != nil {
		// TODO: handle discoverd errors
		shutdown.Fatal(err)
	}
	stream.Close()
	// TODO: handle demotion

	grohl.Log(grohl.Data{"at": "leader"})

	// TODO: periodic full cluster sync for anti-entropy
	c.watchFormations()
}
コード例 #5
0
ファイル: host.go プロジェクト: snormore/flynn
func (h *shutdownHandler) wait() {
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, os.Interrupt, os.Signal(syscall.SIGTERM))
	sig := <-ch
	grohl.Log(grohl.Data{"fn": "shutdown", "at": "start", "signal": fmt.Sprint(sig)})
	h.shutdown(nil)
}
コード例 #6
0
ファイル: host.go プロジェクト: nightscape/flynn
func (h *shutdownHandler) wait() {
	ch := make(chan os.Signal, 1)
	signal.Notify(ch, os.Interrupt, os.Signal(syscall.SIGTERM))
	sig := <-ch
	grohl.Log(grohl.Data{"fn": "shutdown", "at": "start", "signal": fmt.Sprint(sig)})
	// signal exit handlers
	close(h.done)
	// wait for exit handlers to finish
	h.mtx.Lock()
	os.Exit(0)
}
コード例 #7
0
ファイル: service_conn.go プロジェクト: josephwinston/flynn
func connect(discd *discoverd.Client, name string, donec chan struct{}) (*serviceConn, error) {
	srv := discd.Service(name)
	eventc := make(chan *discoverd.Event)

	stream, err := srv.Watch(eventc)
	if err != nil {
		return nil, err
	}

	sc := &serviceConn{
		cond:   &sync.Cond{L: &sync.Mutex{}},
		donec:  donec,
		closec: make(chan struct{}),
		errc:   make(chan error),
	}

	if err := sc.connect(srv); err != nil {
		grohl.Log(grohl.Data{"service": name, "status": "connect-error", "err": err.Error()})
	}

	go sc.watch(srv, eventc, stream)
	return sc, nil
}
コード例 #8
0
ファイル: host.go プロジェクト: justintung/flynn
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	externalIP := args.String["--external-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	nsumount := args.String["--nsumount"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	grohl.AddContext("app", "host")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			shutdown.Fatal(err)
		}
	}

	publishAddr := net.JoinHostPort(externalIP, "1113")
	if discoveryToken != "" {
		// TODO: retry
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			g.Log(grohl.Data{"at": "register_discovery", "status": "error", "err": err.Error()})
			shutdown.Fatal(err)
		}
		g.Log(grohl.Data{"at": "register_discovery", "id": discoveryID})
	}

	state := NewState(hostID, stateFile)
	var backend Backend
	var err error

	// create volume manager
	vman, err := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			g.Log(grohl.Data{"at": "zpool_size", "size": size})

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		},
	)
	if err != nil {
		shutdown.Fatal(err)
	}

	mux := logmux.New(1000)
	shutdown.BeforeExit(func() { mux.Close() })

	switch backendName {
	case "libvirt-lxc":
		backend, err = NewLibvirtLXCBackend(state, vman, logDir, flynnInit, nsumount, mux)
	default:
		log.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)

	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:      hostID,
		url:     publishURL,
		state:   state,
		backend: backend,
		status:  &host.HostStatus{ID: hostID, URL: publishURL},
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		if err := backend.Cleanup(except); err != nil {
			return err
		}
		for _, id := range except {
			if e := backend.Stop(id); e != nil {
				err = e
			}
		}
		return
	}

	resurrect, err := state.Restore(backend)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		discoverdManager.Close()
		stopJobs()
	})
	shutdown.BeforeExit(func() {
		if err := state.MarkForResurrection(); err != nil {
			log.Print("error marking for resurrection", err)
		}
	})

	if err := serveHTTP(
		host,
		&attachHandler{state: state, backend: backend},
		cluster.NewClient(),
		vman,
		discoverdManager.ConnectLocal,
	); err != nil {
		shutdown.Fatal(err)
	}

	if force {
		if err := stopJobs(); err != nil {
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
	}
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		// No peers have working discoverd, so resurrect any available jobs
		resurrect()
	}

	<-make(chan struct{})
}
コード例 #9
0
// ConfigureNetworking is called once during host startup and passed the
// strategy and identifier of the networking coordinatior job. Currently the
// only strategy implemented uses flannel.
func (l *LibvirtLXCBackend) ConfigureNetworking(config *host.NetworkConfig) error {
	var err error
	l.bridgeAddr, l.bridgeNet, err = net.ParseCIDR(config.Subnet)
	if err != nil {
		return err
	}
	l.ipalloc.RequestIP(l.bridgeNet, l.bridgeAddr)

	err = netlink.CreateBridge(bridgeName, false)
	bridgeExists := os.IsExist(err)
	if err != nil && !bridgeExists {
		return err
	}

	bridge, err := net.InterfaceByName(bridgeName)
	if err != nil {
		return err
	}
	if !bridgeExists {
		// We need to explicitly assign the MAC address to avoid it changing to a lower value
		// See: https://github.com/flynn/flynn/issues/223
		b := random.Bytes(5)
		bridgeMAC := fmt.Sprintf("fe:%02x:%02x:%02x:%02x:%02x", b[0], b[1], b[2], b[3], b[4])
		if err := netlink.NetworkSetMacAddress(bridge, bridgeMAC); err != nil {
			return err
		}
	}
	currAddrs, err := bridge.Addrs()
	if err != nil {
		return err
	}
	setIP := true
	for _, addr := range currAddrs {
		ip, net, _ := net.ParseCIDR(addr.String())
		if ip.Equal(l.bridgeAddr) && net.String() == l.bridgeNet.String() {
			setIP = false
		} else {
			if err := netlink.NetworkLinkDelIp(bridge, ip, net); err != nil {
				return err
			}
		}
	}
	if setIP {
		if err := netlink.NetworkLinkAddIp(bridge, l.bridgeAddr, l.bridgeNet); err != nil {
			return err
		}
	}
	if err := netlink.NetworkLinkUp(bridge); err != nil {
		return err
	}

	network, err := l.libvirt.LookupNetworkByName(libvirtNetName)
	if err != nil {
		// network doesn't exist
		networkConfig := &lt.Network{
			Name:    libvirtNetName,
			Bridge:  lt.Bridge{Name: bridgeName},
			Forward: lt.Forward{Mode: "bridge"},
		}
		network, err = l.libvirt.NetworkDefineXML(string(networkConfig.XML()))
		if err != nil {
			return err
		}
	}
	active, err := network.IsActive()
	if err != nil {
		return err
	}
	if !active {
		if err := network.Create(); err != nil {
			return err
		}
	}
	if defaultNet, err := l.libvirt.LookupNetworkByName("default"); err == nil {
		// The default network causes dnsmasq to run and bind to all interfaces,
		// including ours. This prevents discoverd from binding its DNS server.
		// We don't use it, so destroy it if it exists.
		defaultNet.Destroy()
	}

	// enable IP forwarding
	if err := ioutil.WriteFile("/proc/sys/net/ipv4/ip_forward", []byte("1\n"), 0644); err != nil {
		return err
	}

	// Set up iptables for outbound traffic masquerading from containers to the
	// rest of the network.
	if err := iptables.EnableOutboundNAT(bridgeName, l.bridgeNet.String()); err != nil {
		return err
	}

	// Read DNS config, discoverd uses the nameservers
	dnsConf, err := dns.ClientConfigFromFile("/etc/resolv.conf")
	if err != nil {
		return err
	}
	config.Resolvers = dnsConf.Servers

	// Write a resolv.conf to be bind-mounted into containers pointing at the
	// future discoverd DNS listener
	if err := os.MkdirAll("/etc/flynn", 0755); err != nil {
		return err
	}
	var resolvSearch string
	if len(dnsConf.Search) > 0 {
		resolvSearch = fmt.Sprintf("search %s\n", strings.Join(dnsConf.Search, " "))
	}
	if err := ioutil.WriteFile("/etc/flynn/resolv.conf", []byte(fmt.Sprintf("%snameserver %s\n", resolvSearch, l.bridgeAddr.String())), 0644); err != nil {
		return err
	}
	l.resolvConf = "/etc/flynn/resolv.conf"

	// Allocate IPs for running jobs
	for i, container := range l.containers {
		if !container.job.Config.HostNetwork {
			var err error
			l.containers[i].IP, err = l.ipalloc.RequestIP(l.bridgeNet, container.IP)
			if err != nil {
				grohl.Log(grohl.Data{"fn": "ConfigureNetworking", "at": "request_ip", "status": "error", "err": err})
			}
		}
	}

	close(l.networkConfigured)

	return nil
}
コード例 #10
0
ファイル: host.go プロジェクト: nightscape/flynn
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	bindAddr := flag.String("bind", "", "bind containers to this IP")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "/etc/flynn-host.json", "manifest file")
	stateFile := flag.String("state", "", "state file")
	hostID := flag.String("id", strings.Replace(hostname, "-", "", -1), "host id")
	force := flag.Bool("force", false, "kill all containers booted by flynn-host before starting")
	volPath := flag.String("volpath", "/var/lib/flynn-host", "directory to create volumes in")
	backendName := flag.String("backend", "libvirt-lxc", "runner backend (docker or libvirt-lxc)")
	flynnInit := flag.String("flynn-init", "/usr/bin/flynn-init", "path to flynn-init binary")
	metadata := make(MetaFlag)
	flag.Var(&metadata, "meta", "key=value pair to add as metadata")
	flag.Parse()
	grohl.AddContext("app", "host")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	if strings.Contains(*hostID, "-") {
		log.Fatal("host id must not contain dashes")
	}

	portAlloc := map[string]*ports.Allocator{
		"tcp": ports.NewAllocator(55000, 65535),
		"udp": ports.NewAllocator(55000, 65535),
	}

	sh := newShutdownHandler()
	state := NewState()
	var backend Backend
	var err error

	switch *backendName {
	case "libvirt-lxc":
		backend, err = NewLibvirtLXCBackend(state, portAlloc, *volPath, "/tmp/flynn-host-logs", *flynnInit)
	case "docker":
		backend, err = NewDockerBackend(state, portAlloc, *bindAddr)
	default:
		log.Fatalf("unknown backend %q", *backendName)
	}
	if err != nil {
		sh.Fatal(err)
	}

	if err := serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend}, sh); err != nil {
		sh.Fatal(err)
	}

	if *stateFile != "" {
		sh.BeforeExit(func() { os.Remove(*stateFile) })
		if err := state.Restore(*stateFile, backend); err != nil {
			sh.Fatal(err)
		}
	}

	sh.BeforeExit(func() { backend.Cleanup() })

	if *force {
		if err := backend.Cleanup(); err != nil {
			sh.Fatal(err)
		}
	}

	runner := &manifestRunner{
		env:          parseEnviron(),
		externalAddr: *externalAddr,
		bindAddr:     *bindAddr,
		backend:      backend,
		state:        state,
		ports:        portAlloc,
	}

	discAddr := os.Getenv("DISCOVERD")
	var disc *discoverd.Client
	if *manifestFile != "" {
		var r io.Reader
		var f *os.File
		if *manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(*manifestFile)
			if err != nil {
				sh.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			sh.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			discAddr = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(discAddr)
				return
			})
			if err != nil {
				sh.Fatal(err)
			}
		}
	}

	if discAddr == "" && *externalAddr != "" {
		discAddr = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", discAddr)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(discAddr)
		if err != nil {
			sh.Fatal(err)
		}
	}
	sh.BeforeExit(func() { disc.UnregisterAll() })
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		sh.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(*hostID, NewLocalClient(*hostID, sampiCluster))
	if err != nil {
		sh.Fatal(err)
	}
	sh.BeforeExit(func() { cluster.Close() })

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := state.AddListener("all")
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			sh.Fatal(err)
		}
	}
	if h.Metadata == nil {
		h.Metadata = make(map[string]string)
	}
	for k, v := range metadata {
		h.Metadata[k] = v
	}
	h.ID = *hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		hostErr := cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		for job := range jobs {
			if *externalAddr != "" {
				if job.Config.Env == nil {
					job.Config.Env = make(map[string]string)
				}
				job.Config.Env["EXTERNAL_IP"] = *externalAddr
				job.Config.Env["DISCOVERD"] = discAddr
			}
			if err := backend.Run(job); err != nil {
				state.SetStatusFailed(job.ID, err)
			}
		}
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": *hostErr})

		<-newLeader
	}
}
コード例 #11
0
ファイル: host.go プロジェクト: snormore/flynn
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	externalAddr := args.String["--external"]
	bindAddr := args.String["--bind"]
	configFile := args.String["--config"]
	manifestFile := args.String["--manifest"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	metadata := args.All["--meta"].([]string)

	grohl.AddContext("app", "host")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}
	if strings.Contains(hostID, "-") {
		log.Fatal("host id must not contain dashes")
	}

	portAlloc := map[string]*ports.Allocator{
		"tcp": ports.NewAllocator(55000, 65535),
		"udp": ports.NewAllocator(55000, 65535),
	}

	sh := newShutdownHandler()
	state := NewState()
	var backend Backend
	var err error

	switch backendName {
	case "libvirt-lxc":
		backend, err = NewLibvirtLXCBackend(state, portAlloc, volPath, "/tmp/flynn-host-logs", flynnInit)
	case "docker":
		backend, err = NewDockerBackend(state, portAlloc, bindAddr)
	default:
		log.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		sh.Fatal(err)
	}

	if err := serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend}, sh); err != nil {
		sh.Fatal(err)
	}

	if stateFile != "" {
		if err := state.Restore(stateFile, backend); err != nil {
			sh.Fatal(err)
		}
	}

	var jobStream cluster.Stream
	sh.BeforeExit(func() {
		if jobStream != nil {
			jobStream.Close()
		}
		backend.Cleanup()
	})

	if force {
		if err := backend.Cleanup(); err != nil {
			sh.Fatal(err)
		}
	}

	runner := &manifestRunner{
		env:          parseEnviron(),
		externalAddr: externalAddr,
		bindAddr:     bindAddr,
		backend:      backend,
		state:        state,
		ports:        portAlloc,
	}

	discAddr := os.Getenv("DISCOVERD")
	var disc *discoverd.Client
	if manifestFile != "" {
		var r io.Reader
		var f *os.File
		if manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(manifestFile)
			if err != nil {
				sh.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			sh.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			discAddr = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(discAddr)
				return
			})
			if err != nil {
				sh.Fatal(err)
			}
		}
	}

	if discAddr == "" && externalAddr != "" {
		discAddr = externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", discAddr)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(discAddr)
		if err != nil {
			sh.Fatal(err)
		}
	}
	sh.BeforeExit(func() { disc.UnregisterAll() })
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", externalAddr+":1113", map[string]string{"id": hostID})
	if err != nil {
		sh.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(hostID, NewLocalClient(hostID, sampiCluster))
	if err != nil {
		sh.Fatal(err)
	}
	sh.BeforeExit(func() { cluster.Close() })

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := state.AddListener("all")
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if configFile != "" {
		h, err = openConfig(configFile)
		if err != nil {
			sh.Fatal(err)
		}
	}
	if h.Metadata == nil {
		h.Metadata = make(map[string]string)
	}
	for _, s := range metadata {
		kv := strings.SplitN(s, "=", 2)
		h.Metadata[kv[0]] = kv[1]
	}
	h.ID = hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		jobStream = cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		for job := range jobs {
			if externalAddr != "" {
				if job.Config.Env == nil {
					job.Config.Env = make(map[string]string)
				}
				job.Config.Env["EXTERNAL_IP"] = externalAddr
				job.Config.Env["DISCOVERD"] = discAddr
			}
			if err := backend.Run(job); err != nil {
				state.SetStatusFailed(job.ID, err)
			}
		}
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": jobStream.Err})

		// if the process is shutting down, just block
		if sh.Active {
			<-make(chan struct{})
		}

		<-newLeader
	}
}
コード例 #12
0
ファイル: lorne.go プロジェクト: upton/flynn
func main() {
	hostname, _ := os.Hostname()
	externalAddr := flag.String("external", "", "external IP of host")
	bindAddr := flag.String("bind", "", "bind containers to this IP")
	configFile := flag.String("config", "", "configuration file")
	manifestFile := flag.String("manifest", "/etc/flynn-host.json", "manifest file")
	hostID := flag.String("id", hostname, "host id")
	force := flag.Bool("force", false, "kill all containers booted by flynn-host before starting")
	attributes := make(AttributeFlag)
	flag.Var(&attributes, "attribute", "key=value pair to add as an attribute")
	flag.Parse()
	grohl.AddContext("app", "lorne")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	state := NewState()
	backend, err := NewDockerBackend(state, *bindAddr)
	if err != nil {
		log.Fatal(err)
	}

	go serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend})

	if *force {
		if err := backend.Cleanup(); err != nil {
			log.Fatal(err)
		}
	}

	runner := &manifestRunner{
		env:          parseEnviron(),
		externalAddr: *externalAddr,
		bindAddr:     *bindAddr,
		backend:      backend,
	}

	discAddr := os.Getenv("DISCOVERD")
	var disc *discoverd.Client
	if *manifestFile != "" {
		var r io.Reader
		var f *os.File
		if *manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(*manifestFile)
			if err != nil {
				log.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			log.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			discAddr = fmt.Sprintf("%s:%d", d.InternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(discAddr)
				return
			})
			if err != nil {
				log.Fatal(err)
			}
		}
	}

	if discAddr == "" && *externalAddr != "" {
		discAddr = *externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", discAddr)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(discAddr)
		if err != nil {
			log.Fatal(err)
		}
	}
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", *externalAddr+":1113", map[string]string{"id": *hostID})
	if err != nil {
		log.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(*hostID, NewLocalClient(*hostID, sampiCluster))
	if err != nil {
		log.Fatal(err)
	}

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := make(chan host.Event)
	state.AddListener("all", events)
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if *configFile != "" {
		h, err = openConfig(*configFile)
		if err != nil {
			log.Fatal(err)
		}
	}
	if h.Attributes == nil {
		h.Attributes = make(map[string]string)
	}
	for k, v := range attributes {
		h.Attributes[k] = v
	}
	h.ID = *hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		hostErr := cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		for job := range jobs {
			if *externalAddr != "" {
				job.Config.Env = appendUnique(job.Config.Env, "EXTERNAL_IP="+*externalAddr, "DISCOVERD="+discAddr)
			}
			backend.Run(job)
		}
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": *hostErr})

		<-newLeader
	}
}