コード例 #1
0
ファイル: runner.go プロジェクト: ericcapricorn/flynn
func (r *Runner) start() error {
	r.authKey = os.Getenv("AUTH_KEY")
	if r.authKey == "" {
		return errors.New("AUTH_KEY not set")
	}

	r.githubToken = os.Getenv("GITHUB_TOKEN")
	if r.githubToken == "" {
		return errors.New("GITHUB_TOKEN not set")
	}

	awsAuth, err := aws.EnvAuth()
	if err != nil {
		return err
	}
	r.s3Bucket = s3.New(awsAuth, aws.USEast).Bucket(logBucket)

	_, listenPort, err = net.SplitHostPort(args.ListenAddr)
	if err != nil {
		return err
	}

	sh := shutdown.NewHandler()

	bc := r.bc
	bc.Network, err = r.allocateNet()
	if err != nil {
		return err
	}
	if r.rootFS, err = cluster.BuildFlynn(bc, args.RootFS, "origin/master", false, os.Stdout); err != nil {
		return fmt.Errorf("could not build flynn: %s", err)
	}
	r.releaseNet(bc.Network)
	sh.BeforeExit(func() { os.RemoveAll(r.rootFS) })

	db, err := bolt.Open(args.DBPath, 0600, &bolt.Options{Timeout: 5 * time.Second})
	if err != nil {
		return fmt.Errorf("could not open db: %s", err)
	}
	r.db = db
	sh.BeforeExit(func() { r.db.Close() })

	if err := r.db.Update(func(tx *bolt.Tx) error {
		_, err := tx.CreateBucketIfNotExists(dbBucket)
		return err
	}); err != nil {
		return fmt.Errorf("could not create builds bucket: %s", err)
	}

	for i := 0; i < maxBuilds; i++ {
		r.buildCh <- struct{}{}
	}

	if err := r.buildPending(); err != nil {
		log.Printf("could not build pending builds: %s", err)
	}

	go r.watchEvents()

	http.HandleFunc("/", r.httpEventHandler)
	http.HandleFunc("/builds", r.httpBuildHandler)
	http.HandleFunc("/cluster/", r.httpClusterHandler)
	http.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(http.Dir(args.AssetsDir))))
	handler := handlers.CombinedLoggingHandler(os.Stdout, http.DefaultServeMux)
	log.Println("Listening on", args.ListenAddr, "...")
	if err := http.ListenAndServeTLS(args.ListenAddr, args.TLSCert, args.TLSKey, handler); err != nil {
		return fmt.Errorf("ListenAndServeTLS: %s", err)
	}
	return nil
}
コード例 #2
0
ファイル: host.go プロジェクト: ericcapricorn/flynn
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	externalAddr := args.String["--external"]
	bindAddr := args.String["--bind"]
	configFile := args.String["--config"]
	manifestFile := args.String["--manifest"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	metadata := args.All["--meta"].([]string)

	grohl.AddContext("app", "host")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}
	if strings.Contains(hostID, "-") {
		log.Fatal("host id must not contain dashes")
	}

	portAlloc := map[string]*ports.Allocator{
		"tcp": ports.NewAllocator(55000, 65535),
		"udp": ports.NewAllocator(55000, 65535),
	}

	sh := shutdown.NewHandler()
	state := NewState(hostID)
	var backend Backend
	var err error

	switch backendName {
	case "libvirt-lxc":
		backend, err = NewLibvirtLXCBackend(state, portAlloc, volPath, "/tmp/flynn-host-logs", flynnInit)
	default:
		log.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		sh.Fatal(err)
	}

	if err := serveHTTP(&Host{state: state, backend: backend}, &attachHandler{state: state, backend: backend}, sh); err != nil {
		sh.Fatal(err)
	}

	if stateFile != "" {
		if err := state.Restore(stateFile, backend); err != nil {
			sh.Fatal(err)
		}
	}

	var jobStream cluster.Stream
	sh.BeforeExit(func() {
		if jobStream != nil {
			jobStream.Close()
		}
		backend.Cleanup()
	})

	if force {
		if err := backend.Cleanup(); err != nil {
			sh.Fatal(err)
		}
	}

	runner := &manifestRunner{
		env:          parseEnviron(),
		externalAddr: externalAddr,
		bindAddr:     bindAddr,
		backend:      backend,
		state:        state,
		ports:        portAlloc,
	}

	discAddr := os.Getenv("DISCOVERD")
	var disc *discoverd.Client
	if manifestFile != "" {
		var r io.Reader
		var f *os.File
		if manifestFile == "-" {
			r = os.Stdin
		} else {
			f, err = os.Open(manifestFile)
			if err != nil {
				sh.Fatal(err)
			}
			r = f
		}
		services, err := runner.runManifest(r)
		if err != nil {
			sh.Fatal(err)
		}
		if f != nil {
			f.Close()
		}

		if d, ok := services["discoverd"]; ok {
			discAddr = fmt.Sprintf("%s:%d", d.ExternalIP, d.TCPPorts[0])
			var disc *discoverd.Client
			err = Attempts.Run(func() (err error) {
				disc, err = discoverd.NewClientWithAddr(discAddr)
				return
			})
			if err != nil {
				sh.Fatal(err)
			}
		}
	}

	if discAddr == "" && externalAddr != "" {
		discAddr = externalAddr + ":1111"
	}
	// HACK: use env as global for discoverd connection in sampic
	os.Setenv("DISCOVERD", discAddr)
	if disc == nil {
		disc, err = discoverd.NewClientWithAddr(discAddr)
		if err != nil {
			sh.Fatal(err)
		}
	}
	sh.BeforeExit(func() { disc.UnregisterAll() })
	sampiStandby, err := disc.RegisterAndStandby("flynn-host", externalAddr+":1113", map[string]string{"id": hostID})
	if err != nil {
		sh.Fatal(err)
	}

	// Check if we are the leader so that we can use the cluster functions directly
	sampiCluster := sampi.NewCluster(sampi.NewState())
	select {
	case <-sampiStandby:
		g.Log(grohl.Data{"at": "sampi_leader"})
		rpc.Register(sampiCluster)
	case <-time.After(5 * time.Millisecond):
		go func() {
			<-sampiStandby
			g.Log(grohl.Data{"at": "sampi_leader"})
			rpc.Register(sampiCluster)
		}()
	}
	cluster, err := cluster.NewClientWithSelf(hostID, NewLocalClient(hostID, sampiCluster))
	if err != nil {
		sh.Fatal(err)
	}
	sh.BeforeExit(func() { cluster.Close() })

	g.Log(grohl.Data{"at": "sampi_connected"})

	events := state.AddListener("all")
	go syncScheduler(cluster, events)

	h := &host.Host{}
	if configFile != "" {
		h, err = openConfig(configFile)
		if err != nil {
			sh.Fatal(err)
		}
	}
	if h.Metadata == nil {
		h.Metadata = make(map[string]string)
	}
	for _, s := range metadata {
		kv := strings.SplitN(s, "=", 2)
		h.Metadata[kv[0]] = kv[1]
	}
	h.ID = hostID

	for {
		newLeader := cluster.NewLeaderSignal()

		h.Jobs = state.ClusterJobs()
		jobs := make(chan *host.Job)
		jobStream = cluster.RegisterHost(h, jobs)
		g.Log(grohl.Data{"at": "host_registered"})
		for job := range jobs {
			if externalAddr != "" {
				if job.Config.Env == nil {
					job.Config.Env = make(map[string]string)
				}
				job.Config.Env["EXTERNAL_IP"] = externalAddr
				job.Config.Env["DISCOVERD"] = discAddr
			}
			if err := backend.Run(job); err != nil {
				state.SetStatusFailed(job.ID, err)
			}
		}
		g.Log(grohl.Data{"at": "sampi_disconnected", "err": jobStream.Err})

		// if the process is shutting down, just block
		if sh.Active {
			<-make(chan struct{})
		}

		<-newLeader
	}
}