Esempio n. 1
0
func main() {
	defer shutdown.Exit()

	apiPort := os.Getenv("PORT")
	if apiPort == "" {
		apiPort = "5000"
	}

	logAddr := flag.String("logaddr", ":3000", "syslog input listen address")
	apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address")
	flag.Parse()

	a := NewAggregator(*logAddr)
	if err := a.Start(); err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(a.Shutdown)

	listener, err := reuseport.NewReusablePortListener("tcp4", *apiAddr)
	if err != nil {
		shutdown.Fatal(err)
	}

	hb, err := discoverd.AddServiceAndRegister("flynn-logaggregator", *logAddr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	shutdown.Fatal(http.Serve(listener, apiHandler(a)))
}
Esempio n. 2
0
func main() {
	serviceName := os.Getenv("FLYNN_POSTGRES")
	if serviceName == "" {
		serviceName = "postgres"
	}
	singleton := os.Getenv("SINGLETON") == "true"
	password := os.Getenv("PGPASSWORD")

	const dataDir = "/data"
	idFile := filepath.Join(dataDir, "instance_id")
	idBytes, err := ioutil.ReadFile(idFile)
	if err != nil && !os.IsNotExist(err) {
		shutdown.Fatalf("error reading instance ID: %s", err)
	}
	id := string(idBytes)
	if len(id) == 0 {
		id = random.UUID()
		if err := ioutil.WriteFile(idFile, []byte(id), 0644); err != nil {
			shutdown.Fatalf("error writing instance ID: %s", err)
		}
	}

	err = discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{
		LeaderType: discoverd.LeaderTypeManual,
	})
	if err != nil && !httphelper.IsObjectExistsError(err) {
		shutdown.Fatal(err)
	}
	inst := &discoverd.Instance{
		Addr: ":5432",
		Meta: map[string]string{pgIdKey: id},
	}
	hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log := log15.New("app", "postgres")

	pg := NewPostgres(Config{
		ID:           id,
		Singleton:    singleton,
		DataDir:      filepath.Join(dataDir, "db"),
		BinDir:       "/usr/lib/postgresql/9.5/bin/",
		Password:     password,
		Logger:       log.New("component", "postgres"),
		ExtWhitelist: true,
		WaitUpstream: true,
		SHMType:      "posix",
	})
	dd := sd.NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd"))

	peer := state.NewPeer(inst, id, pgIdKey, singleton, dd, pg, log.New("component", "peer"))
	shutdown.BeforeExit(func() { peer.Close() })

	go peer.Run()
	shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http")))
	// TODO(titanous): clean shutdown of postgres
}
Esempio n. 3
0
func main() {
	log := logger.New("fn", "main")

	log.Info("creating controller client")
	client, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		log.Error("error creating controller client", "err", err)
		shutdown.Fatal(err)
	}

	log.Info("connecting to postgres")
	db := postgres.Wait(nil, schema.PrepareStatements)

	shutdown.BeforeExit(func() { db.Close() })

	go func() {
		status.AddHandler(func() status.Status {
			_, err := db.ConnPool.Exec("ping")
			if err != nil {
				return status.Unhealthy
			}
			return status.Healthy
		})
		addr := ":" + os.Getenv("PORT")
		hb, err := discoverd.AddServiceAndRegister("controller-worker", addr)
		if err != nil {
			shutdown.Fatal(err)
		}
		shutdown.BeforeExit(func() { hb.Close() })
		shutdown.Fatal(http.ListenAndServe(addr, nil))
	}()

	workers := que.NewWorkerPool(
		que.NewClient(db.ConnPool),
		que.WorkMap{
			"deployment":             deployment.JobHandler(db, client, logger),
			"app_deletion":           app_deletion.JobHandler(db, client, logger),
			"domain_migration":       domain_migration.JobHandler(db, client, logger),
			"release_cleanup":        release_cleanup.JobHandler(db, client, logger),
			"app_garbage_collection": app_garbage_collection.JobHandler(db, client, logger),
		},
		workerCount,
	)
	workers.Interval = 5 * time.Second

	log.Info("starting workers", "count", workerCount, "interval", workers.Interval)
	workers.Start()
	shutdown.BeforeExit(func() { workers.Shutdown() })

	select {} // block and keep running
}
Esempio n. 4
0
func main() {
	defer shutdown.Exit()

	dsn := &mariadb.DSN{
		Host:     serviceHost + ":3306",
		User:     "******",
		Password: os.Getenv("MYSQL_PWD"),
		Database: "mysql",
	}
	db, err := sql.Open("mysql", dsn.String())
	api := &API{db}

	router := httprouter.New()
	router.POST("/databases", httphelper.WrapHandler(api.createDatabase))
	router.DELETE("/databases", httphelper.WrapHandler(api.dropDatabase))
	router.GET("/ping", httphelper.WrapHandler(api.ping))

	port := os.Getenv("PORT")
	if port == "" {
		port = "3000"
	}
	addr := ":" + port

	hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router))
	shutdown.Fatal(http.ListenAndServe(addr, handler))
}
Esempio n. 5
0
func main() {
	defer shutdown.Exit()

	api := &API{}

	router := httprouter.New()
	router.POST("/databases", api.createDatabase)
	router.DELETE("/databases", api.dropDatabase)
	router.GET("/ping", api.ping)

	port := os.Getenv("PORT")
	if port == "" {
		port = "3000"
	}
	addr := ":" + port

	hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router))
	shutdown.Fatal(http.ListenAndServe(addr, handler))
}
Esempio n. 6
0
File: main.go Progetto: devick/flynn
/*
	ish: the Inexusable/Insecure/Internet SHell.
*/
func main() {
	defer shutdown.Exit()

	name := os.Getenv("NAME")
	port := os.Getenv("PORT")
	addr := ":" + port
	if name == "" {
		name = "ish-service"
	}

	l, err := net.Listen("tcp", addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	defer l.Close()

	hb, err := discoverd.AddServiceAndRegister(name, addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	http.HandleFunc("/ish", ish)
	if err := http.Serve(l, nil); err != nil {
		shutdown.Fatal(err)
	}
}
Esempio n. 7
0
func main() {
	defer shutdown.Exit()

	db := postgres.Wait(&postgres.Conf{
		Service:  serviceName,
		User:     "******",
		Password: os.Getenv("PGPASSWORD"),
		Database: "postgres",
	}, nil)
	api := &pgAPI{db}

	router := httprouter.New()
	router.POST("/databases", httphelper.WrapHandler(api.createDatabase))
	router.DELETE("/databases", httphelper.WrapHandler(api.dropDatabase))
	router.GET("/ping", httphelper.WrapHandler(api.ping))

	port := os.Getenv("PORT")
	if port == "" {
		port = "3000"
	}
	addr := ":" + port

	hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router))
	shutdown.Fatal(http.ListenAndServe(addr, handler))
}
Esempio n. 8
0
func (d *discoverdWrapper) Register() (bool, error) {
	log := logger.New("fn", "discoverd.Register")

	log.Info("registering with service discovery")
	hb, err := discoverd.AddServiceAndRegister(serviceName, d.addr)
	if err != nil {
		log.Error("error registering with service discovery", "err", err)
		return false, err
	}
	shutdown.BeforeExit(func() { hb.Close() })

	selfAddr := hb.Addr()
	log = log.New("self.addr", selfAddr)

	service := discoverd.NewService(serviceName)
	var leaders chan *discoverd.Instance
	var stream stream.Stream
	connect := func() (err error) {
		log.Info("connecting service leader stream")
		leaders = make(chan *discoverd.Instance)
		stream, err = service.Leaders(leaders)
		if err != nil {
			log.Error("error connecting service leader stream", "err", err)
		}
		return
	}
	if err := connect(); err != nil {
		return false, err
	}

	go func() {
	outer:
		for {
			for leader := range leaders {
				if leader == nil {
					// a nil leader indicates there are no instances for
					// the service, ignore and wait for an actual leader
					log.Warn("received nil leader event")
					continue
				}
				log.Info("received leader event", "leader.addr", leader.Addr)
				d.leader <- leader.Addr == selfAddr
			}
			log.Warn("service leader stream disconnected", "err", stream.Err())
			for {
				if err := connect(); err == nil {
					continue outer
				}
				time.Sleep(100 * time.Millisecond)
			}
		}
	}()

	select {
	case isLeader := <-d.leader:
		return isLeader, nil
	case <-time.After(30 * time.Second):
		return false, errors.New("timed out waiting for current service leader")
	}
}
Esempio n. 9
0
// Run executes the program.
func (m *Main) Run() error {
	m.Logger.Info("running")

	// Read or generate the instance identifier from file.
	id, err := m.readID(filepath.Join(m.DataDir, "instance_id"))
	if err != nil {
		return err
	}
	m.Process.ID = id
	m.Process.DataDir = m.DataDir
	m.Process.Logger = m.Logger.New("component", "process", "id", id)

	// Start process.
	m.Logger.Info("starting process", "id", id)
	if err := m.Process.Start(); err != nil {
		m.Logger.Error("error starting process", "err", err)
		return err
	}

	// Add service to discoverd registry.
	m.Logger.Info("adding service", "name", m.ServiceName)
	if err = m.DiscoverdClient.AddService(m.ServiceName, nil); err != nil && !httphelper.IsObjectExistsError(err) {
		m.Logger.Error("error adding discoverd service", "err", err)
		return err
	}
	inst := &discoverd.Instance{
		Addr: ":" + m.Process.Port,
		Meta: map[string]string{"REDIS_ID": id},
	}

	// Register instance and retain heartbeater.
	m.Logger.Info("registering instance", "addr", inst.Addr, "meta", inst.Meta)
	hb, err := m.DiscoverdClient.RegisterInstance(m.ServiceName, inst)
	if err != nil {
		m.Logger.Error("error registering discoverd instance", "err", err)
		return err
	}
	m.hb = hb
	shutdown.BeforeExit(func() { hb.Close() })

	m.Logger.Info("opening port", "addr", m.Addr)

	// Open HTTP port.
	ln, err := net.Listen("tcp", m.Addr)
	if err != nil {
		m.Logger.Error("error opening port", "err", err)
		return err
	}
	m.ln = ln

	// Initialize and server handler.
	m.Logger.Info("serving http api")
	h := redis.NewHandler()
	h.Process = m.Process
	h.Heartbeater = m.hb
	h.Logger = m.Logger.New("component", "http")
	go func() { http.Serve(ln, h) }()

	return nil
}
Esempio n. 10
0
func runServer(_ *docopt.Args) error {
	addr := ":" + os.Getenv("PORT")

	db := postgres.Wait(nil, nil)
	if err := dbMigrations.Migrate(db); err != nil {
		return fmt.Errorf("error running DB migrations: %s", err)
	}

	mux := http.NewServeMux()

	repo, err := data.NewFileRepoFromEnv(db)
	if err != nil {
		return err
	}

	hb, err := discoverd.AddServiceAndRegister("blobstore", addr)
	if err != nil {
		return err
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log.Println("Blobstore serving files on " + addr)

	mux.Handle("/", handler(repo))
	mux.Handle(status.Path, status.Handler(func() status.Status {
		if err := db.Exec("SELECT 1"); err != nil {
			return status.Unhealthy
		}
		return status.Healthy
	}))

	h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux))
	return http.ListenAndServe(addr, h)
}
Esempio n. 11
0
func main() {
	log := logger.New("fn", "main")

	log.Info("creating controller client")
	client, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		log.Error("error creating controller client", "err", err)
		shutdown.Fatal()
	}

	log.Info("connecting to postgres")
	db := postgres.Wait("", "")

	log.Info("creating postgres connection pool")
	pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{
		ConnConfig: pgx.ConnConfig{
			Host:     os.Getenv("PGHOST"),
			User:     os.Getenv("PGUSER"),
			Password: os.Getenv("PGPASSWORD"),
			Database: os.Getenv("PGDATABASE"),
		},
		AfterConnect:   que.PrepareStatements,
		MaxConnections: workerCount,
	})
	if err != nil {
		log.Error("error creating postgres connection pool", "err", err)
		shutdown.Fatal()
	}
	shutdown.BeforeExit(func() { pgxpool.Close() })

	workers := que.NewWorkerPool(
		que.NewClient(pgxpool),
		que.WorkMap{
			"deployment":   deployment.JobHandler(db, client, logger),
			"app_deletion": app_deletion.JobHandler(db, client, logger),
		},
		workerCount,
	)
	workers.Interval = 5 * time.Second

	log.Info("starting workers", "count", workerCount, "interval", workers.Interval)
	workers.Start()
	shutdown.BeforeExit(func() { workers.Shutdown() })

	select {} // block and keep running
}
Esempio n. 12
0
File: main.go Progetto: ozum/flynn
func main() {
	defer shutdown.Exit()

	apiPort := os.Getenv("PORT")
	if apiPort == "" {
		apiPort = "5000"
	}

	logAddr := flag.String("logaddr", ":3000", "syslog input listen address")
	apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address")
	snapshotPath := flag.String("snapshot", "", "snapshot path")
	flag.Parse()

	a := NewAggregator(*logAddr)

	if *snapshotPath != "" {
		if err := a.ReplaySnapshot(*snapshotPath); err != nil {
			shutdown.Fatal(err)
		}

		shutdown.BeforeExit(func() {
			if err := a.TakeSnapshot(*snapshotPath); err != nil {
				log15.Error("snapshot error", "err", err)
			}
		})
	}

	if err := a.Start(); err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(a.Shutdown)

	listener, err := net.Listen("tcp4", *apiAddr)
	if err != nil {
		shutdown.Fatal(err)
	}

	hb, err := discoverd.AddServiceAndRegister("flynn-logaggregator", *logAddr)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	shutdown.Fatal(http.Serve(listener, apiHandler(a)))
}
Esempio n. 13
0
func main() {
	serviceName := os.Getenv("FLYNN_POSTGRES")
	if serviceName == "" {
		serviceName = "postgres"
	}
	singleton := os.Getenv("SINGLETON") == "true"
	password := os.Getenv("PGPASSWORD")

	err := discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{
		LeaderType: discoverd.LeaderTypeManual,
	})
	if err != nil && !httphelper.IsObjectExistsError(err) {
		shutdown.Fatal(err)
	}
	inst := &discoverd.Instance{Addr: ":5432"}
	hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	log := log15.New("app", "postgres")

	pg := NewPostgres(Config{
		ID:           inst.ID,
		Singleton:    singleton,
		BinDir:       "/usr/lib/postgresql/9.4/bin/",
		Password:     password,
		Logger:       log.New("component", "postgres"),
		ExtWhitelist: true,
		WaitUpstream: true,
		// TODO(titanous) investigate this:
		SHMType: "sysv", // the default on 9.4, 'posix' is not currently supported in our containers
	})
	dd := NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd"))

	peer := state.NewPeer(inst, singleton, dd, pg, log.New("component", "peer"))
	shutdown.BeforeExit(func() { peer.Close() })

	go peer.Run()
	shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http")))
	// TODO(titanous): clean shutdown of postgres
}
Esempio n. 14
0
func main() {
	defer shutdown.Exit()

	grohl.AddContext("app", "controller-scheduler")
	grohl.Log(grohl.Data{"at": "start"})

	go startHTTPServer()

	if period := os.Getenv("BACKOFF_PERIOD"); period != "" {
		var err error
		backoffPeriod, err = time.ParseDuration(period)
		if err != nil {
			shutdown.Fatal(err)
		}
		grohl.Log(grohl.Data{"at": "backoff_period", "period": backoffPeriod.String()})
	}

	cc, err := controller.NewClient("", os.Getenv("AUTH_KEY"))
	if err != nil {
		shutdown.Fatal(err)
	}
	c := newContext(cc, cluster.NewClient())

	c.watchHosts()

	grohl.Log(grohl.Data{"at": "leaderwait"})
	hb, err := discoverd.AddServiceAndRegister("controller-scheduler", ":"+os.Getenv("PORT"))
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	leaders := make(chan *discoverd.Instance)
	stream, err := discoverd.NewService("controller-scheduler").Leaders(leaders)
	if err != nil {
		shutdown.Fatal(err)
	}
	for leader := range leaders {
		if leader.Addr == hb.Addr() {
			break
		}
	}
	if err := stream.Err(); err != nil {
		// TODO: handle discoverd errors
		shutdown.Fatal(err)
	}
	stream.Close()
	// TODO: handle demotion

	grohl.Log(grohl.Data{"at": "leader"})

	// TODO: periodic full cluster sync for anti-entropy
	c.watchFormations()
}
Esempio n. 15
0
func NewDiscoverdManager(backend Backend, mux *logmux.LogMux, hostID, publishAddr string) *DiscoverdManager {
	d := &DiscoverdManager{
		backend: backend,
		mux:     mux,
		inst: &discoverd.Instance{
			Addr: publishAddr,
			Meta: map[string]string{"id": hostID},
		},
	}
	shutdown.BeforeExit(d.Close)
	return d
}
Esempio n. 16
0
func main() {
	defer shutdown.Exit()

	// Initialize main program and execute.
	m := NewMain()
	shutdown.BeforeExit(func() { fmt.Fprintln(m.Stderr, "discoverd is exiting") })
	if err := m.Run(os.Args[1:]...); err != nil {
		fmt.Fprintln(m.Stderr, err.Error())
		os.Exit(1)
	}

	// Wait indefinitely.
	<-(chan struct{})(nil)
}
Esempio n. 17
0
func main() {
	defer shutdown.Exit()

	apiPort := os.Getenv("PORT")
	if apiPort == "" {
		apiPort = "5000"
	}

	logAddr := flag.String("logaddr", ":3000", "syslog input listen address")
	apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address")
	flag.Parse()

	conf := ServerConfig{
		SyslogAddr:  *logAddr,
		ApiAddr:     *apiAddr,
		Discoverd:   discoverd.DefaultClient,
		ServiceName: "logaggregator",
	}

	srv := NewServer(conf)
	shutdown.BeforeExit(srv.Shutdown)

	// get leader for snapshot (if any)
	leader, err := conf.Discoverd.Service(conf.ServiceName).Leader()
	if err == nil {
		host, _, _ := net.SplitHostPort(leader.Addr)
		log15.Info("loading snapshot from leader", "leader", host)

		c, _ := client.New("http://" + host)
		snapshot, err := c.GetSnapshot()
		if err == nil {
			if err := srv.LoadSnapshot(snapshot); err != nil {
				log15.Error("error receiving snapshot from leader", "error", err)
			}
			snapshot.Close()
		} else {
			log15.Error("error getting snapshot from leader", "error", err)
		}
	} else {
		log15.Info("error finding leader for snapshot", "error", err)
	}

	if err := srv.Start(); err != nil {
		shutdown.Fatal(err)
	}
	<-make(chan struct{})
}
Esempio n. 18
0
func main() {
	port := os.Getenv("PORT")
	if port == "" {
		port = "3000"
	}
	addr := ":" + port

	if seed := os.Getenv("NAME_SEED"); seed != "" {
		s, err := hex.DecodeString(seed)
		if err != nil {
			log.Fatalln("error decoding NAME_SEED:", err)
		}
		name.SetSeed(s)
	}

	db, err := postgres.Open("", "")
	if err != nil {
		log.Fatal(err)
	}

	if err := migrateDB(db.DB); err != nil {
		log.Fatal(err)
	}

	cc, err := cluster.NewClient()
	if err != nil {
		log.Fatal(err)
	}

	sc, err := routerc.New()
	if err != nil {
		log.Fatal(err)
	}

	if err := discoverd.Register("flynn-controller", addr); err != nil {
		log.Fatal(err)
	}

	shutdown.BeforeExit(func() {
		discoverd.Unregister("flynn-controller", addr)
	})

	handler, _ := appHandler(handlerConfig{db: db, cc: cc, sc: sc, dc: discoverd.DefaultClient, key: os.Getenv("AUTH_KEY")})
	log.Fatal(http.ListenAndServe(addr, handler))
}
Esempio n. 19
0
func main() {
	defer shutdown.Exit()

	flag.Parse()

	addr := os.Getenv("PORT")
	if addr == "" {
		addr = *listenPort
	}
	addr = ":" + addr

	var fs Filesystem
	var storageDesc string

	if *storageDir != "" {
		fs = NewOSFilesystem(*storageDir)
		storageDesc = *storageDir
	} else {
		db, err := postgres.Open("", "")
		if err != nil {
			shutdown.Fatal(err)
		}
		fs, err = NewPostgresFilesystem(db.DB)
		if err != nil {
			shutdown.Fatal(err)
		}
		storageDesc = "Postgres"
	}

	if *serviceDiscovery {
		hb, err := discoverd.AddServiceAndRegister("blobstore", addr)
		if err != nil {
			shutdown.Fatal(err)
		}
		shutdown.BeforeExit(func() { hb.Close() })
	}

	log.Println("Blobstore serving files on " + addr + " from " + storageDesc)

	http.Handle("/", handler(fs))
	status.AddHandler(fs.Status)

	shutdown.Fatal(http.ListenAndServe(addr, nil))
}
Esempio n. 20
0
func main() {
	defer shutdown.Exit()

	flag.Parse()

	addr := os.Getenv("PORT")
	if addr == "" {
		addr = *listenPort
	}
	addr = ":" + addr

	var fs Filesystem
	var storageDesc string

	if *storageDir != "" {
		fs = NewOSFilesystem(*storageDir)
		storageDesc = *storageDir
	} else {
		var err error
		db := postgres.Wait(nil, nil)
		fs, err = NewPostgresFilesystem(db)
		if err != nil {
			shutdown.Fatal(err)
		}
		storageDesc = "Postgres"
	}

	if *serviceDiscovery {
		hb, err := discoverd.AddServiceAndRegister("blobstore", addr)
		if err != nil {
			shutdown.Fatal(err)
		}
		shutdown.BeforeExit(func() { hb.Close() })
	}

	log.Println("Blobstore serving files on " + addr + " from " + storageDesc)

	mux := http.NewServeMux()
	mux.Handle("/", handler(fs))
	mux.Handle(status.Path, status.Handler(fs.Status))

	h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux))
	shutdown.Fatal(http.ListenAndServe(addr, h))
}
Esempio n. 21
0
func NewLibcontainerBackend(config *LibcontainerConfig) (Backend, error) {
	factory, err := libcontainer.New(
		containerRoot,
		libcontainer.Cgroupfs,
		libcontainer.InitArgs(os.Args[0], "libcontainer-init"),
	)
	if err != nil {
		return nil, err
	}

	if err := setupCGroups(config.PartitionCGroups); err != nil {
		return nil, err
	}

	defaultTmpfs, err := createTmpfs(resource.DefaultTempDiskSize)
	if err != nil {
		return nil, err
	}
	shutdown.BeforeExit(func() { defaultTmpfs.Delete() })

	l := &LibcontainerBackend{
		LibcontainerConfig:  config,
		factory:             factory,
		logStreams:          make(map[string]map[string]*logmux.LogStream),
		containers:          make(map[string]*Container),
		defaultEnv:          make(map[string]string),
		resolvConf:          "/etc/resolv.conf",
		ipalloc:             ipallocator.New(),
		discoverdConfigured: make(chan struct{}),
		networkConfigured:   make(chan struct{}),
		globalState:         &libcontainerGlobalState{},
		defaultTmpfs:        defaultTmpfs,
	}
	l.httpClient = &http.Client{Transport: &http.Transport{
		Dial: dialer.RetryDial(l.discoverdDial),
	}}
	return l, nil
}
Esempio n. 22
0
// Run executes the program.
func (m *Main) Run() error {
	// Open HTTP port.
	ln, err := net.Listen("tcp", m.Addr)
	if err != nil {
		return err
	}
	m.ln = ln

	// Initialize handler logger.
	m.Handler.Logger = m.Logger.New("component", "http")

	// Register service with discoverd.
	hb, err := discoverd.AddServiceAndRegister(m.ServiceName+"-api", m.Addr)
	if err != nil {
		return err
	}
	shutdown.BeforeExit(func() { hb.Close() })

	h := httphelper.ContextInjector(m.ServiceName+"-api", httphelper.NewRequestLogger(m.Handler))
	go func() { http.Serve(ln, h) }()

	return nil
}
Esempio n. 23
0
func serveHTTP(h *Host, addr string, attach *attachHandler, clus *cluster.Client, vman *volumemanager.Manager, connectDiscoverd func(string) error) error {
	l, err := net.Listen("tcp", addr)
	if err != nil {
		return err
	}
	shutdown.BeforeExit(func() { l.Close() })

	r := httprouter.New()

	r.POST("/attach", attach.ServeHTTP)

	jobAPI := &jobAPI{
		host:             h,
		connectDiscoverd: connectDiscoverd,
	}
	jobAPI.RegisterRoutes(r)
	volAPI := volumeapi.NewHTTPAPI(clus, vman)
	volAPI.RegisterRoutes(r)

	go http.Serve(l, httphelper.ContextInjector("host", httphelper.NewRequestLogger(r)))

	return nil
}
Esempio n. 24
0
func (r *Runner) start() error {
	r.authKey = os.Getenv("AUTH_KEY")
	if r.authKey == "" {
		return errors.New("AUTH_KEY not set")
	}

	r.githubToken = os.Getenv("GITHUB_TOKEN")
	if r.githubToken == "" {
		return errors.New("GITHUB_TOKEN not set")
	}

	awsAuth, err := aws.EnvAuth()
	if err != nil {
		return err
	}
	r.s3Bucket = s3.New(awsAuth, aws.USEast).Bucket(logBucket)

	_, listenPort, err = net.SplitHostPort(args.ListenAddr)
	if err != nil {
		return err
	}

	bc := r.bc
	bc.Network = r.allocateNet()
	if r.rootFS, err = cluster.BuildFlynn(bc, args.RootFS, "origin/master", false, os.Stdout); err != nil {
		return fmt.Errorf("could not build flynn: %s", err)
	}
	r.releaseNet(bc.Network)
	shutdown.BeforeExit(func() { removeRootFS(r.rootFS) })

	db, err := bolt.Open(args.DBPath, 0600, &bolt.Options{Timeout: 5 * time.Second})
	if err != nil {
		return fmt.Errorf("could not open db: %s", err)
	}
	r.db = db
	shutdown.BeforeExit(func() { r.db.Close() })

	if err := r.db.Update(func(tx *bolt.Tx) error {
		_, err := tx.CreateBucketIfNotExists(dbBucket)
		return err
	}); err != nil {
		return fmt.Errorf("could not create builds bucket: %s", err)
	}

	for i := 0; i < maxConcurrentBuilds; i++ {
		r.buildCh <- struct{}{}
	}

	if err := r.buildPending(); err != nil {
		log.Printf("could not build pending builds: %s", err)
	}

	go r.connectIRC()
	go r.watchEvents()

	router := httprouter.New()
	router.RedirectTrailingSlash = true
	router.Handler("GET", "/", http.RedirectHandler("/builds", 302))
	router.POST("/", r.handleEvent)
	router.GET("/builds/:build", r.getBuildLog)
	router.POST("/builds/:build/restart", r.restartBuild)
	router.POST("/builds/:build/explain", r.explainBuild)
	router.GET("/builds", r.getBuilds)
	router.ServeFiles("/assets/*filepath", http.Dir(args.AssetsDir))
	router.GET("/cluster/:cluster", r.clusterAPI(r.getCluster))
	router.POST("/cluster/:cluster", r.clusterAPI(r.addHost))
	router.POST("/cluster/:cluster/release", r.clusterAPI(r.addReleaseHosts))
	router.DELETE("/cluster/:cluster/:host", r.clusterAPI(r.removeHost))

	srv := &http.Server{
		Addr:      args.ListenAddr,
		Handler:   router,
		TLSConfig: tlsconfig.SecureCiphers(nil),
	}
	log.Println("Listening on", args.ListenAddr, "...")
	if err := srv.ListenAndServeTLS(args.TLSCert, args.TLSKey); err != nil {
		return fmt.Errorf("ListenAndServeTLS: %s", err)
	}

	return nil
}
Esempio n. 25
0
func main() {
	defer shutdown.Exit()

	var cookieKey *[32]byte
	if key := os.Getenv("COOKIE_KEY"); key != "" {
		res, err := base64.StdEncoding.DecodeString(key)
		if err != nil {
			shutdown.Fatalf("error decoding COOKIE_KEY: %s", err)
		}
		if len(res) != 32 {
			shutdown.Fatalf("decoded %d bytes from COOKIE_KEY, expected 32", len(res))
		}
		var k [32]byte
		copy(k[:], res)
		cookieKey = &k
	}
	if cookieKey == nil {
		shutdown.Fatal("Missing random 32 byte base64-encoded COOKIE_KEY")
	}

	httpPort := flag.String("http-port", "8080", "http listen port")
	httpsPort := flag.String("https-port", "4433", "https listen port")
	tcpIP := flag.String("tcp-ip", os.Getenv("LISTEN_IP"), "tcp router listen ip")
	tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start")
	tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end")
	certFile := flag.String("tls-cert", "", "TLS (SSL) cert file in pem format")
	keyFile := flag.String("tls-key", "", "TLS (SSL) key file in pem format")
	apiPort := flag.String("api-port", "", "api listen port")
	flag.Parse()

	if *apiPort == "" {
		*apiPort = os.Getenv("PORT")
		if *apiPort == "" {
			*apiPort = "5000"
		}
	}

	keypair := tls.Certificate{}
	var err error
	if *certFile != "" {
		if keypair, err = tls.LoadX509KeyPair(*certFile, *keyFile); err != nil {
			shutdown.Fatal(err)
		}
	} else if tlsCert := os.Getenv("TLSCERT"); tlsCert != "" {
		if tlsKey := os.Getenv("TLSKEY"); tlsKey != "" {
			os.Setenv("TLSKEY", fmt.Sprintf("md5^(%s)", md5sum(tlsKey)))
			if keypair, err = tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)); err != nil {
				shutdown.Fatal(err)
			}
		}
	}

	log := logger.New("fn", "main")

	log.Info("connecting to postgres")
	db, err := postgres.Open("", "")
	if err != nil {
		log.Error("error connecting to postgres", "err", err)
		shutdown.Fatal(err)
	}
	log.Info("running DB migrations")
	if err := migrateDB(db.DB); err != nil {
		log.Error("error running DB migrations", "err", err)
		shutdown.Fatal(err)
	}

	var pgport int
	if port := os.Getenv("PGPORT"); port != "" {
		var err error
		if pgport, err = strconv.Atoi(port); err != nil {
			shutdown.Fatal(err)
		}
	}

	log.Info("creating postgres connection pool")
	pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{
		ConnConfig: pgx.ConnConfig{
			Host:     os.Getenv("PGHOST"),
			Port:     uint16(pgport),
			Database: os.Getenv("PGDATABASE"),
			User:     os.Getenv("PGUSER"),
			Password: os.Getenv("PGPASSWORD"),
		},
	})
	if err != nil {
		log.Error("error creating postgres connection pool", "err", err)
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() { pgxpool.Close() })

	httpAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpPort)
	httpsAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *httpsPort)
	r := Router{
		TCP: &TCPListener{
			IP:        *tcpIP,
			startPort: *tcpRangeStart,
			endPort:   *tcpRangeEnd,
			ds:        NewPostgresDataStore("tcp", pgxpool),
			discoverd: discoverd.DefaultClient,
		},
		HTTP: &HTTPListener{
			Addr:      httpAddr,
			TLSAddr:   httpsAddr,
			cookieKey: cookieKey,
			keypair:   keypair,
			ds:        NewPostgresDataStore("http", pgxpool),
			discoverd: discoverd.DefaultClient,
		},
	}

	if err := r.Start(); err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(r.Close)

	apiAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *apiPort)
	log.Info("starting API listener")
	listener, err := listenFunc("tcp4", apiAddr)
	if err != nil {
		log.Error("error starting API listener", "err", err)
		shutdown.Fatal(listenErr{apiAddr, err})
	}

	services := map[string]string{
		"router-api":  apiAddr,
		"router-http": httpAddr,
	}
	for service, addr := range services {
		log.Info("registering service", "name", service, "addr", addr)
		hb, err := discoverd.AddServiceAndRegister(service, addr)
		if err != nil {
			log.Error("error registering service", "name", service, "addr", addr, "err", err)
			shutdown.Fatal(err)
		}
		shutdown.BeforeExit(func() { hb.Close() })
	}

	log.Info("serving API requests")
	shutdown.Fatal(http.Serve(listener, apiHandler(&r)))
}
Esempio n. 26
0
func runDaemon(args *docopt.Args) {
	hostname, _ := os.Hostname()
	externalIP := args.String["--external-ip"]
	stateFile := args.String["--state"]
	hostID := args.String["--id"]
	force := args.Bool["--force"]
	volPath := args.String["--volpath"]
	backendName := args.String["--backend"]
	flynnInit := args.String["--flynn-init"]
	nsumount := args.String["--nsumount"]
	logDir := args.String["--log-dir"]
	discoveryToken := args.String["--discovery"]

	var peerIPs []string
	if args.String["--peer-ips"] != "" {
		peerIPs = strings.Split(args.String["--peer-ips"], ",")
	}

	grohl.AddContext("app", "host")
	grohl.Log(grohl.Data{"at": "start"})
	g := grohl.NewContext(grohl.Data{"fn": "main"})

	if hostID == "" {
		hostID = strings.Replace(hostname, "-", "", -1)
	}
	if strings.Contains(hostID, "-") {
		shutdown.Fatal("host id must not contain dashes")
	}
	if externalIP == "" {
		var err error
		externalIP, err = config.DefaultExternalIP()
		if err != nil {
			shutdown.Fatal(err)
		}
	}

	publishAddr := net.JoinHostPort(externalIP, "1113")
	if discoveryToken != "" {
		// TODO: retry
		discoveryID, err := discovery.RegisterInstance(discovery.Info{
			ClusterURL:  discoveryToken,
			InstanceURL: "http://" + publishAddr,
			Name:        hostID,
		})
		if err != nil {
			g.Log(grohl.Data{"at": "register_discovery", "status": "error", "err": err.Error()})
			shutdown.Fatal(err)
		}
		g.Log(grohl.Data{"at": "register_discovery", "id": discoveryID})
	}

	state := NewState(hostID, stateFile)
	var backend Backend
	var err error

	// create volume manager
	vman, err := volumemanager.New(
		filepath.Join(volPath, "volumes.bolt"),
		func() (volume.Provider, error) {
			// use a zpool backing file size of either 70% of the device on which
			// volumes will reside, or 100GB if that can't be determined.
			var size int64
			var dev syscall.Statfs_t
			if err := syscall.Statfs(volPath, &dev); err == nil {
				size = (dev.Bsize * int64(dev.Blocks) * 7) / 10
			} else {
				size = 100000000000
			}
			g.Log(grohl.Data{"at": "zpool_size", "size": size})

			return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{
				DatasetName: "flynn-default",
				Make: &zfsVolume.MakeDev{
					BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"),
					Size:            size,
				},
				WorkingDir: filepath.Join(volPath, "zfs"),
			})
		},
	)
	if err != nil {
		shutdown.Fatal(err)
	}

	mux := logmux.New(1000)
	shutdown.BeforeExit(func() { mux.Close() })

	switch backendName {
	case "libvirt-lxc":
		backend, err = NewLibvirtLXCBackend(state, vman, logDir, flynnInit, nsumount, mux)
	default:
		log.Fatalf("unknown backend %q", backendName)
	}
	if err != nil {
		shutdown.Fatal(err)
	}
	backend.SetDefaultEnv("EXTERNAL_IP", externalIP)

	discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr)
	publishURL := "http://" + publishAddr
	host := &Host{
		id:      hostID,
		url:     publishURL,
		state:   state,
		backend: backend,
		status:  &host.HostStatus{ID: hostID, URL: publishURL},
	}

	// stopJobs stops all jobs, leaving discoverd until the end so other
	// jobs can unregister themselves on shutdown.
	stopJobs := func() (err error) {
		var except []string
		host.statusMtx.RLock()
		if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" {
			except = []string{host.status.Discoverd.JobID}
		}
		host.statusMtx.RUnlock()
		if err := backend.Cleanup(except); err != nil {
			return err
		}
		for _, id := range except {
			if e := backend.Stop(id); e != nil {
				err = e
			}
		}
		return
	}

	resurrect, err := state.Restore(backend)
	if err != nil {
		shutdown.Fatal(err)
	}
	shutdown.BeforeExit(func() {
		// close discoverd before stopping jobs so we can unregister first
		discoverdManager.Close()
		stopJobs()
	})
	shutdown.BeforeExit(func() {
		if err := state.MarkForResurrection(); err != nil {
			log.Print("error marking for resurrection", err)
		}
	})

	if err := serveHTTP(
		host,
		&attachHandler{state: state, backend: backend},
		cluster.NewClient(),
		vman,
		discoverdManager.ConnectLocal,
	); err != nil {
		shutdown.Fatal(err)
	}

	if force {
		if err := stopJobs(); err != nil {
			shutdown.Fatal(err)
		}
	}

	if discoveryToken != "" {
		instances, err := discovery.GetCluster(discoveryToken)
		if err != nil {
			// TODO(titanous): retry?
			shutdown.Fatal(err)
		}
		peerIPs = make([]string, 0, len(instances))
		for _, inst := range instances {
			u, err := url.Parse(inst.URL)
			if err != nil {
				continue
			}
			ip, _, err := net.SplitHostPort(u.Host)
			if err != nil || ip == externalIP {
				continue
			}
			peerIPs = append(peerIPs, ip)
		}
	}
	if err := discoverdManager.ConnectPeer(peerIPs); err != nil {
		// No peers have working discoverd, so resurrect any available jobs
		resurrect()
	}

	<-make(chan struct{})
}
Esempio n. 27
0
func runJob(client controller.Client, config runConfig) error {
	req := &ct.NewJob{
		Args:       config.Args,
		TTY:        config.Stdin == nil && config.Stdout == nil && term.IsTerminal(os.Stdin.Fd()) && term.IsTerminal(os.Stdout.Fd()) && !config.Detached,
		ReleaseID:  config.Release,
		Env:        config.Env,
		ReleaseEnv: config.ReleaseEnv,
		DisableLog: config.DisableLog,
	}

	// ensure slug apps from old clusters use /runner/init
	release, err := client.GetRelease(req.ReleaseID)
	if err != nil {
		return err
	}
	if release.IsGitDeploy() && (len(req.Args) == 0 || req.Args[0] != "/runner/init") {
		req.Args = append([]string{"/runner/init"}, req.Args...)
	}

	// set deprecated Entrypoint and Cmd for old clusters
	if len(req.Args) > 0 {
		req.DeprecatedEntrypoint = []string{req.Args[0]}
	}
	if len(req.Args) > 1 {
		req.DeprecatedCmd = req.Args[1:]
	}

	if config.Stdin == nil {
		config.Stdin = os.Stdin
	}
	if config.Stdout == nil {
		config.Stdout = os.Stdout
	}
	if config.Stderr == nil {
		config.Stderr = os.Stderr
	}
	if req.TTY {
		if req.Env == nil {
			req.Env = make(map[string]string)
		}
		ws, err := term.GetWinsize(os.Stdin.Fd())
		if err != nil {
			return err
		}
		req.Columns = int(ws.Width)
		req.Lines = int(ws.Height)
		req.Env["COLUMNS"] = strconv.Itoa(int(ws.Width))
		req.Env["LINES"] = strconv.Itoa(int(ws.Height))
		req.Env["TERM"] = os.Getenv("TERM")
	}

	if config.Detached {
		job, err := client.RunJobDetached(config.App, req)
		if err != nil {
			return err
		}
		log.Println(job.ID)
		return nil
	}

	rwc, err := client.RunJobAttached(config.App, req)
	if err != nil {
		return err
	}
	defer rwc.Close()
	attachClient := cluster.NewAttachClient(rwc)

	var termState *term.State
	if req.TTY {
		termState, err = term.MakeRaw(os.Stdin.Fd())
		if err != nil {
			return err
		}
		// Restore the terminal if we return without calling os.Exit
		defer term.RestoreTerminal(os.Stdin.Fd(), termState)
		go func() {
			ch := make(chan os.Signal, 1)
			signal.Notify(ch, SIGWINCH)
			for range ch {
				ws, err := term.GetWinsize(os.Stdin.Fd())
				if err != nil {
					return
				}
				attachClient.ResizeTTY(ws.Height, ws.Width)
				attachClient.Signal(int(SIGWINCH))
			}
		}()
	}

	go func() {
		ch := make(chan os.Signal, 1)
		signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM)
		sig := <-ch
		attachClient.Signal(int(sig.(syscall.Signal)))
		time.Sleep(10 * time.Second)
		attachClient.Signal(int(syscall.SIGKILL))
	}()

	go func() {
		io.Copy(attachClient, config.Stdin)
		attachClient.CloseWrite()
	}()

	childDone := make(chan struct{})
	shutdown.BeforeExit(func() {
		<-childDone
	})
	exitStatus, err := attachClient.Receive(config.Stdout, config.Stderr)
	close(childDone)
	if err != nil {
		return err
	}
	if req.TTY {
		term.RestoreTerminal(os.Stdin.Fd(), termState)
	}
	if config.Exit {
		shutdown.ExitWithCode(exitStatus)
	}
	if exitStatus != 0 {
		return RunExitError(exitStatus)
	}
	return nil
}
Esempio n. 28
0
func (d *discoverdWrapper) Register() bool {
	log := d.logger.New("fn", "discoverd.Register")

	var hb discoverd.Heartbeater
	for {
		var err error
		log.Info("registering with service discovery")
		hb, err = discoverd.AddServiceAndRegister(serviceName, ":"+os.Getenv("PORT"))
		if err == nil {
			break
		}
		log.Error("error registering with service discovery", "err", err)
		time.Sleep(time.Second)
	}
	shutdown.BeforeExit(func() { hb.Close() })

	selfAddr := hb.Addr()
	log = log.New("self.addr", selfAddr)

	service := discoverd.NewService(serviceName)
	var leaders chan *discoverd.Instance
	var stream stream.Stream
	connect := func() (err error) {
		log.Info("connecting service leader stream")
		leaders = make(chan *discoverd.Instance)
		stream, err = service.Leaders(leaders)
		if err != nil {
			log.Error("error connecting service leader stream", "err", err)
		}
		return
	}

	go func() {
		for {
			for {
				if err := connect(); err == nil {
					break
				}
				time.Sleep(100 * time.Millisecond)
			}
			for leader := range leaders {
				if leader == nil {
					// a nil leader indicates there are no instances for
					// the service, ignore and wait for an actual leader
					log.Warn("received nil leader event")
					continue
				}
				log.Info("received leader event", "leader.addr", leader.Addr)
				d.leader <- leader.Addr == selfAddr
			}
			log.Warn("service leader stream disconnected", "err", stream.Err())
		}
	}()

	start := time.Now()
	tick := time.Tick(30 * time.Second)
	for {
		select {
		case isLeader := <-d.leader:
			return isLeader
		case <-tick:
			log.Warn("still waiting for current service leader", "duration", time.Since(start))
		}
	}
}
Esempio n. 29
0
func main() {
	apiPort := os.Getenv("PORT")
	if apiPort == "" {
		apiPort = "5000"
	}
	var cookieKey *[32]byte
	if key := os.Getenv("COOKIE_KEY"); key != "" {
		res, err := base64.StdEncoding.DecodeString(key)
		if err != nil {
			log.Fatal("error decoding COOKIE_KEY:", err)
		}
		var k [32]byte
		copy(k[:], res)
		cookieKey = &k
	}

	httpAddr := flag.String("httpaddr", ":8080", "http listen address")
	httpsAddr := flag.String("httpsaddr", ":4433", "https listen address")
	tcpIP := flag.String("tcpip", "", "tcp router listen ip")
	tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start")
	tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end")
	apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address")
	flag.Parse()

	// Will use DISCOVERD environment variable
	d, err := discoverd.NewClient()
	if err != nil {
		log.Fatal(err)
	}
	services := map[string]string{
		"router-api":  *apiAddr,
		"router-http": *httpAddr,
	}
	for service, addr := range services {
		if err := d.Register(service, addr); err != nil {
			log.Fatal(err)
		}
	}

	shutdown.BeforeExit(func() {
		for service, addr := range services {
			discoverd.Unregister(service, addr)
		}
	})

	// Read etcd addresses from ETCD
	etcdAddrs := strings.Split(os.Getenv("ETCD"), ",")
	if len(etcdAddrs) == 1 && etcdAddrs[0] == "" {
		if externalIP := os.Getenv("EXTERNAL_IP"); externalIP != "" {
			etcdAddrs = []string{fmt.Sprintf("http://%s:4001", externalIP)}
		} else {
			etcdAddrs = nil
		}
	}
	etcdc := etcd.NewClient(etcdAddrs)

	prefix := os.Getenv("ETCD_PREFIX")
	if prefix == "" {
		prefix = "/router"
	}
	var r Router
	r.TCP = NewTCPListener(*tcpIP, *tcpRangeStart, *tcpRangeEnd, NewEtcdDataStore(etcdc, path.Join(prefix, "tcp/")), d)
	r.HTTP = NewHTTPListener(*httpAddr, *httpsAddr, cookieKey, NewEtcdDataStore(etcdc, path.Join(prefix, "http/")), d)

	go func() { log.Fatal(r.ListenAndServe(nil)) }()
	log.Fatal(http.ListenAndServe(*apiAddr, apiHandler(&r)))
}
Esempio n. 30
0
func main() {
	defer shutdown.Exit()

	port := os.Getenv("PORT")
	if port == "" {
		port = "3000"
	}
	addr := ":" + port

	if seed := os.Getenv("NAME_SEED"); seed != "" {
		s, err := hex.DecodeString(seed)
		if err != nil {
			log.Fatalln("error decoding NAME_SEED:", err)
		}
		name.SetSeed(s)
	}

	db := postgres.Wait("", "")

	if err := migrateDB(db.DB); err != nil {
		shutdown.Fatal(err)
	}

	pgxcfg, err := pgx.ParseURI(fmt.Sprintf("http://%s:%s@%s/%s", os.Getenv("PGUSER"), os.Getenv("PGPASSWORD"), db.Addr(), os.Getenv("PGDATABASE")))
	if err != nil {
		log.Fatal(err)
	}
	pgxcfg.Dial = dialer.Retry.Dial

	pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{
		ConnConfig:   pgxcfg,
		AfterConnect: que.PrepareStatements,
	})
	if err != nil {
		log.Fatal(err)
	}
	shutdown.BeforeExit(func() { pgxpool.Close() })

	lc, err := logaggc.New("")
	if err != nil {
		shutdown.Fatal(err)
	}
	rc := routerc.New()

	hb, err := discoverd.DefaultClient.AddServiceAndRegisterInstance("flynn-controller", &discoverd.Instance{
		Addr:  addr,
		Proto: "http",
		Meta: map[string]string{
			"AUTH_KEY": os.Getenv("AUTH_KEY"),
		},
	})
	if err != nil {
		shutdown.Fatal(err)
	}

	shutdown.BeforeExit(func() {
		hb.Close()
	})

	handler := appHandler(handlerConfig{
		db:      db,
		cc:      clusterClientWrapper{cluster.NewClient()},
		lc:      lc,
		rc:      rc,
		pgxpool: pgxpool,
		keys:    strings.Split(os.Getenv("AUTH_KEY"), ","),
	})
	shutdown.Fatal(http.ListenAndServe(addr, handler))
}