/* ish: the Inexusable/Insecure/Internet SHell. */ func main() { defer shutdown.Exit() name := os.Getenv("NAME") port := os.Getenv("PORT") addr := ":" + port if name == "" { name = "ish-service" } l, err := net.Listen("tcp", addr) if err != nil { shutdown.Fatal(err) } defer l.Close() hb, err := discoverd.AddServiceAndRegister(name, addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) http.HandleFunc("/ish", ish) if err := http.Serve(l, nil); err != nil { shutdown.Fatal(err) } }
func main() { logger := log15.New("component", "scheduler") logger.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, log15.StdoutHandler)) log := logger.New("fn", "main") log.Info("creating cluster and controller clients") hc := &http.Client{Timeout: 5 * time.Second} clusterClient := utils.ClusterClientWrapper(cluster.NewClientWithHTTP(nil, hc)) controllerClient, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal(err) } log.Info("waiting for controller API to come up") if _, err := discoverd.GetInstances("controller", 5*time.Minute); err != nil { log.Error("error waiting for controller API", "err", err) shutdown.Fatal(err) } s := NewScheduler(clusterClient, controllerClient, newDiscoverdWrapper(logger), logger) log.Info("started scheduler", "backoffPeriod", s.backoffPeriod) go s.startHTTPServer(os.Getenv("PORT")) if err := s.Run(); err != nil { shutdown.Fatal(err) } shutdown.Exit() }
func main() { defer shutdown.Exit() db := postgres.Wait(&postgres.Conf{ Service: serviceName, User: "******", Password: os.Getenv("PGPASSWORD"), Database: "postgres", }, nil) api := &pgAPI{db} router := httprouter.New() router.POST("/databases", httphelper.WrapHandler(api.createDatabase)) router.DELETE("/databases", httphelper.WrapHandler(api.dropDatabase)) router.GET("/ping", httphelper.WrapHandler(api.ping)) port := os.Getenv("PORT") if port == "" { port = "3000" } addr := ":" + port hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router)) shutdown.Fatal(http.ListenAndServe(addr, handler)) }
func (h *jobAPI) ConfigureNetworking(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { log := h.host.log.New("fn", "ConfigureNetworking") log.Info("decoding config") config := &host.NetworkConfig{} if err := httphelper.DecodeJSON(r, config); err != nil { log.Error("error decoding config", "err", err) shutdown.Fatal(err) } // configure the network before returning a response in case the // network coordinator requires the bridge to be created (e.g. // when using flannel with the "alloc" backend) h.host.networkOnce.Do(func() { log.Info("configuring network", "subnet", config.Subnet, "mtu", config.MTU, "resolvers", config.Resolvers) if err := h.host.backend.ConfigureNetworking(config); err != nil { log.Error("error configuring network", "err", err) shutdown.Fatal(err) } h.host.statusMtx.Lock() h.host.status.Network = config h.host.statusMtx.Unlock() }) }
func main() { defer shutdown.Exit() dsn := &mariadb.DSN{ Host: serviceHost + ":3306", User: "******", Password: os.Getenv("MYSQL_PWD"), Database: "mysql", } db, err := sql.Open("mysql", dsn.String()) api := &API{db} router := httprouter.New() router.POST("/databases", httphelper.WrapHandler(api.createDatabase)) router.DELETE("/databases", httphelper.WrapHandler(api.dropDatabase)) router.GET("/ping", httphelper.WrapHandler(api.ping)) port := os.Getenv("PORT") if port == "" { port = "3000" } addr := ":" + port hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router)) shutdown.Fatal(http.ListenAndServe(addr, handler)) }
func main() { defer shutdown.Exit() api := &API{} router := httprouter.New() router.POST("/databases", api.createDatabase) router.DELETE("/databases", api.dropDatabase) router.GET("/ping", api.ping) port := os.Getenv("PORT") if port == "" { port = "3000" } addr := ":" + port hb, err := discoverd.AddServiceAndRegister(serviceName+"-api", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) handler := httphelper.ContextInjector(serviceName+"-api", httphelper.NewRequestLogger(router)) shutdown.Fatal(http.ListenAndServe(addr, handler)) }
func main() { serviceName := os.Getenv("FLYNN_POSTGRES") if serviceName == "" { serviceName = "postgres" } singleton := os.Getenv("SINGLETON") == "true" password := os.Getenv("PGPASSWORD") const dataDir = "/data" idFile := filepath.Join(dataDir, "instance_id") idBytes, err := ioutil.ReadFile(idFile) if err != nil && !os.IsNotExist(err) { shutdown.Fatalf("error reading instance ID: %s", err) } id := string(idBytes) if len(id) == 0 { id = random.UUID() if err := ioutil.WriteFile(idFile, []byte(id), 0644); err != nil { shutdown.Fatalf("error writing instance ID: %s", err) } } err = discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{ LeaderType: discoverd.LeaderTypeManual, }) if err != nil && !httphelper.IsObjectExistsError(err) { shutdown.Fatal(err) } inst := &discoverd.Instance{ Addr: ":5432", Meta: map[string]string{pgIdKey: id}, } hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) log := log15.New("app", "postgres") pg := NewPostgres(Config{ ID: id, Singleton: singleton, DataDir: filepath.Join(dataDir, "db"), BinDir: "/usr/lib/postgresql/9.5/bin/", Password: password, Logger: log.New("component", "postgres"), ExtWhitelist: true, WaitUpstream: true, SHMType: "posix", }) dd := sd.NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd")) peer := state.NewPeer(inst, id, pgIdKey, singleton, dd, pg, log.New("component", "peer")) shutdown.BeforeExit(func() { peer.Close() }) go peer.Run() shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http"))) // TODO(titanous): clean shutdown of postgres }
func main() { defer shutdown.Exit() apiPort := os.Getenv("PORT") if apiPort == "" { apiPort = "5000" } logAddr := flag.String("logaddr", ":3000", "syslog input listen address") apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address") flag.Parse() a := NewAggregator(*logAddr) if err := a.Start(); err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(a.Shutdown) listener, err := reuseport.NewReusablePortListener("tcp4", *apiAddr) if err != nil { shutdown.Fatal(err) } hb, err := discoverd.AddServiceAndRegister("flynn-logaggregator", *logAddr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) shutdown.Fatal(http.Serve(listener, apiHandler(a))) }
func main() { m := NewMain() if err := m.ParseFlags(os.Args[1:]); err != nil { shutdown.Fatal(err) } if err := m.Run(); err != nil { shutdown.Fatal(err) } <-(chan struct{})(nil) }
func main() { defer shutdown.Exit() grohl.AddContext("app", "controller-scheduler") grohl.Log(grohl.Data{"at": "start"}) go startHTTPServer() if period := os.Getenv("BACKOFF_PERIOD"); period != "" { var err error backoffPeriod, err = time.ParseDuration(period) if err != nil { shutdown.Fatal(err) } grohl.Log(grohl.Data{"at": "backoff_period", "period": backoffPeriod.String()}) } cc, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { shutdown.Fatal(err) } c := newContext(cc, cluster.NewClient()) c.watchHosts() grohl.Log(grohl.Data{"at": "leaderwait"}) hb, err := discoverd.AddServiceAndRegister("controller-scheduler", ":"+os.Getenv("PORT")) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) leaders := make(chan *discoverd.Instance) stream, err := discoverd.NewService("controller-scheduler").Leaders(leaders) if err != nil { shutdown.Fatal(err) } for leader := range leaders { if leader.Addr == hb.Addr() { break } } if err := stream.Err(); err != nil { // TODO: handle discoverd errors shutdown.Fatal(err) } stream.Close() // TODO: handle demotion grohl.Log(grohl.Data{"at": "leader"}) // TODO: periodic full cluster sync for anti-entropy c.watchFormations() }
func main() { log := logger.New("fn", "main") log.Info("creating controller client") client, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal(err) } log.Info("connecting to postgres") db := postgres.Wait(nil, schema.PrepareStatements) shutdown.BeforeExit(func() { db.Close() }) go func() { status.AddHandler(func() status.Status { _, err := db.ConnPool.Exec("ping") if err != nil { return status.Unhealthy } return status.Healthy }) addr := ":" + os.Getenv("PORT") hb, err := discoverd.AddServiceAndRegister("controller-worker", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) shutdown.Fatal(http.ListenAndServe(addr, nil)) }() workers := que.NewWorkerPool( que.NewClient(db.ConnPool), que.WorkMap{ "deployment": deployment.JobHandler(db, client, logger), "app_deletion": app_deletion.JobHandler(db, client, logger), "domain_migration": domain_migration.JobHandler(db, client, logger), "release_cleanup": release_cleanup.JobHandler(db, client, logger), "app_garbage_collection": app_garbage_collection.JobHandler(db, client, logger), }, workerCount, ) workers.Interval = 5 * time.Second log.Info("starting workers", "count", workerCount, "interval", workers.Interval) workers.Start() shutdown.BeforeExit(func() { workers.Shutdown() }) select {} // block and keep running }
func (h *jobAPI) ConfigureNetworking(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { config := &host.NetworkConfig{} if err := httphelper.DecodeJSON(r, config); err != nil { shutdown.Fatal(err) } go h.networkOnce.Do(func() { if err := h.host.backend.ConfigureNetworking(config); err != nil { shutdown.Fatal(err) } h.statusMtx.Lock() h.status.Network = config h.statusMtx.Unlock() }) }
func (d *DiscoverdManager) ConnectLocal(url string) error { if d.localConnected() { return errors.New("host: discoverd is already configured") } disc := discoverd.NewClientWithURL(url) hb, err := disc.AddServiceAndRegisterInstance("flynn-host", d.inst) if err != nil { return err } d.mtx.Lock() if d.peer != nil { d.peer.Close() } d.local = hb d.mtx.Unlock() d.backend.SetDefaultEnv("DISCOVERD", url) go func() { if err := d.mux.Connect(disc, "flynn-logaggregator"); err != nil { shutdown.Fatal(err) } }() return nil }
func (h *Host) ConfigureDiscoverd(config *host.DiscoverdConfig) { log := h.log.New("fn", "ConfigureDiscoverd") if config.JobID != "" { log.Info("persisting discoverd job_id", "job.id", config.JobID) if err := h.state.SetPersistentSlot("discoverd", config.JobID); err != nil { log.Error("error assigning discoverd to persistent slot") } } if config.URL != "" && config.DNS != "" { go h.discoverdOnce.Do(func() { log.Info("connecting to service discovery", "url", config.URL) if err := h.discMan.ConnectLocal(config.URL); err != nil { log.Error("error connecting to service discovery", "err", err) shutdown.Fatal(err) } }) } h.statusMtx.Lock() h.status.Discoverd = config h.backend.SetDiscoverdConfig(h.status.Discoverd) h.statusMtx.Unlock() if config.URL != "" { h.volAPI.ConfigureClusterClient(config.URL) } }
func (h *jobAPI) ConfigureDiscoverd(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { log := h.host.log.New("fn", "ConfigureDiscoverd") log.Info("decoding config") var config host.DiscoverdConfig if err := httphelper.DecodeJSON(r, &config); err != nil { log.Error("error decoding config", "err", err) httphelper.Error(w, err) return } log.Info("config decoded", "url", config.URL, "dns", config.DNS) h.host.statusMtx.Lock() h.host.status.Discoverd = &config h.host.statusMtx.Unlock() if config.URL != "" && config.DNS != "" { go h.host.discoverdOnce.Do(func() { log.Info("connecting to service discovery", "url", config.URL) if err := h.host.discMan.ConnectLocal(config.URL); err != nil { log.Error("error connecting to service discovery", "err", err) shutdown.Fatal(err) } }) } }
func (h *Host) ConfigureNetworking(config *host.NetworkConfig) { log := h.log.New("fn", "ConfigureNetworking") if config.JobID != "" { log.Info("persisting flannel job_id", "job.id", config.JobID) if err := h.state.SetPersistentSlot("flannel", config.JobID); err != nil { log.Error("error assigning flannel to persistent slot") } } h.networkOnce.Do(func() { log.Info("configuring network", "subnet", config.Subnet, "mtu", config.MTU, "resolvers", config.Resolvers) if err := h.backend.ConfigureNetworking(config); err != nil { log.Error("error configuring network", "err", err) shutdown.Fatal(err) } h.statusMtx.Lock() h.status.Network = config h.statusMtx.Unlock() }) h.statusMtx.Lock() h.status.Network.JobID = config.JobID h.backend.SetNetworkConfig(h.status.Network) h.statusMtx.Unlock() }
func runClusterMigrateDomain(args *docopt.Args) error { client, err := getClusterClient() if err != nil { shutdown.Fatal(err) } dm := &ct.DomainMigration{ Domain: args.String["<domain>"], } release, err := client.GetAppRelease("controller") if err != nil { return err } dm.OldDomain = release.Env["DEFAULT_ROUTE_DOMAIN"] if !promptYesNo(fmt.Sprintf("Migrate cluster domain from %q to %q?", dm.OldDomain, dm.Domain)) { fmt.Println("Aborted") return nil } maxDuration := 2 * time.Minute fmt.Printf("Migrating cluster domain (this can take up to %s)...\n", maxDuration) events := make(chan *ct.Event) stream, err := client.StreamEvents(controller.StreamEventsOptions{ ObjectTypes: []ct.EventType{ct.EventTypeDomainMigration}, }, events) if err != nil { return nil } defer stream.Close() if err := client.PutDomain(dm); err != nil { return err } timeout := time.After(maxDuration) for { select { case event, ok := <-events: if !ok { return stream.Err() } var e *ct.DomainMigrationEvent if err := json.Unmarshal(event.Data, &e); err != nil { return err } if e.Error != "" { fmt.Println(e.Error) } if e.DomainMigration.FinishedAt != nil { fmt.Printf("Changed cluster domain from %q to %q\n", dm.OldDomain, dm.Domain) return nil } case <-timeout: return errors.New("timed out waiting for domain migration to complete") } } }
func mustApp() string { name, err := app() if err != nil { shutdown.Fatal(err) } return name }
func runCommand(name string, args []string) (err error) { argv := make([]string, 1, 1+len(args)) argv[0] = name argv = append(argv, args...) cmd, ok := commands[name] if !ok { return fmt.Errorf("%s is not a flynn command. See 'flynn help'", name) } parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", cmd.optsFirst) if err != nil { return err } switch f := cmd.f.(type) { case func(*docopt.Args, *controller.Client) error: // create client and run command client, err := getClusterClient() if err != nil { shutdown.Fatal(err) } return f(parsedArgs, client) case func(*docopt.Args) error: return f(parsedArgs) case func() error: return f() case func(): f() return nil } return fmt.Errorf("unexpected command type %T", cmd.f) }
func runCommand(name string, args []string) (err error) { argv := make([]string, 1, 1+len(args)) argv[0] = name argv = append(argv, args...) cmd, ok := commands[name] if !ok { return fmt.Errorf("%s is not a flynn command. See 'flynn help'", name) } parsedArgs, err := docopt.Parse(cmd.usage, argv, true, "", cmd.optsFirst) if err != nil { return err } switch f := cmd.f.(type) { case func(*docopt.Args, *controller.Client) error: // create client and run command cluster, err := getCluster() if err != nil { shutdown.Fatal(err) } var client *controller.Client if cluster.TLSPin != "" { pin, err := base64.StdEncoding.DecodeString(cluster.TLSPin) if err != nil { log.Fatalln("error decoding tls pin:", err) } client, err = controller.NewClientWithConfig(cluster.URL, cluster.Key, controller.Config{Pin: pin}) } else { client, err = controller.NewClient(cluster.URL, cluster.Key) } if err != nil { shutdown.Fatal(err) } return f(parsedArgs, client) case func(*docopt.Args) error: return f(parsedArgs) case func() error: return f() case func(): f() return nil } return fmt.Errorf("unexpected command type %T", cmd.f) }
func main() { log := logger.New("fn", "main") log.Info("creating controller client") client, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal() } log.Info("connecting to postgres") db := postgres.Wait("", "") log.Info("creating postgres connection pool") pgxpool, err := pgx.NewConnPool(pgx.ConnPoolConfig{ ConnConfig: pgx.ConnConfig{ Host: os.Getenv("PGHOST"), User: os.Getenv("PGUSER"), Password: os.Getenv("PGPASSWORD"), Database: os.Getenv("PGDATABASE"), }, AfterConnect: que.PrepareStatements, MaxConnections: workerCount, }) if err != nil { log.Error("error creating postgres connection pool", "err", err) shutdown.Fatal() } shutdown.BeforeExit(func() { pgxpool.Close() }) workers := que.NewWorkerPool( que.NewClient(pgxpool), que.WorkMap{ "deployment": deployment.JobHandler(db, client, logger), "app_deletion": app_deletion.JobHandler(db, client, logger), }, workerCount, ) workers.Interval = 5 * time.Second log.Info("starting workers", "count", workerCount, "interval", workers.Interval) workers.Start() shutdown.BeforeExit(func() { workers.Shutdown() }) select {} // block and keep running }
func main() { defer shutdown.Exit() apiPort := os.Getenv("PORT") if apiPort == "" { apiPort = "5000" } logAddr := flag.String("logaddr", ":3000", "syslog input listen address") apiAddr := flag.String("apiaddr", ":"+apiPort, "api listen address") snapshotPath := flag.String("snapshot", "", "snapshot path") flag.Parse() a := NewAggregator(*logAddr) if *snapshotPath != "" { if err := a.ReplaySnapshot(*snapshotPath); err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { if err := a.TakeSnapshot(*snapshotPath); err != nil { log15.Error("snapshot error", "err", err) } }) } if err := a.Start(); err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(a.Shutdown) listener, err := net.Listen("tcp4", *apiAddr) if err != nil { shutdown.Fatal(err) } hb, err := discoverd.AddServiceAndRegister("flynn-logaggregator", *logAddr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) shutdown.Fatal(http.Serve(listener, apiHandler(a))) }
func main() { defer shutdown.Exit() flag.Parse() addr := os.Getenv("PORT") if addr == "" { addr = *listenPort } addr = ":" + addr var fs Filesystem var storageDesc string if *storageDir != "" { fs = NewOSFilesystem(*storageDir) storageDesc = *storageDir } else { db, err := postgres.Open("", "") if err != nil { shutdown.Fatal(err) } fs, err = NewPostgresFilesystem(db.DB) if err != nil { shutdown.Fatal(err) } storageDesc = "Postgres" } if *serviceDiscovery { hb, err := discoverd.AddServiceAndRegister("blobstore", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) } log.Println("Blobstore serving files on " + addr + " from " + storageDesc) http.Handle("/", handler(fs)) status.AddHandler(fs.Status) shutdown.Fatal(http.ListenAndServe(addr, nil)) }
func main() { defer shutdown.Exit() flag.Parse() addr := os.Getenv("PORT") if addr == "" { addr = *listenPort } addr = ":" + addr var fs Filesystem var storageDesc string if *storageDir != "" { fs = NewOSFilesystem(*storageDir) storageDesc = *storageDir } else { var err error db := postgres.Wait(nil, nil) fs, err = NewPostgresFilesystem(db) if err != nil { shutdown.Fatal(err) } storageDesc = "Postgres" } if *serviceDiscovery { hb, err := discoverd.AddServiceAndRegister("blobstore", addr) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) } log.Println("Blobstore serving files on " + addr + " from " + storageDesc) mux := http.NewServeMux() mux.Handle("/", handler(fs)) mux.Handle(status.Path, status.Handler(fs.Status)) h := httphelper.ContextInjector("blobstore", httphelper.NewRequestLogger(mux)) shutdown.Fatal(http.ListenAndServe(addr, h)) }
func (h *jobAPI) ConfigureNetworking(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { config := &host.NetworkConfig{} if err := httphelper.DecodeJSON(r, config); err != nil { shutdown.Fatal(err) } // configure the network before returning a response in case the // network coordinator requires the bridge to be created (e.g. // when using flannel with the "alloc" backend) h.networkOnce.Do(func() { if err := h.host.backend.ConfigureNetworking(config); err != nil { shutdown.Fatal(err) } h.host.statusMtx.Lock() h.host.status.Network = config h.host.statusMtx.Unlock() }) }
func main() { serviceName := os.Getenv("FLYNN_POSTGRES") if serviceName == "" { serviceName = "postgres" } singleton := os.Getenv("SINGLETON") == "true" password := os.Getenv("PGPASSWORD") err := discoverd.DefaultClient.AddService(serviceName, &discoverd.ServiceConfig{ LeaderType: discoverd.LeaderTypeManual, }) if err != nil && !httphelper.IsObjectExistsError(err) { shutdown.Fatal(err) } inst := &discoverd.Instance{Addr: ":5432"} hb, err := discoverd.DefaultClient.RegisterInstance(serviceName, inst) if err != nil { shutdown.Fatal(err) } shutdown.BeforeExit(func() { hb.Close() }) log := log15.New("app", "postgres") pg := NewPostgres(Config{ ID: inst.ID, Singleton: singleton, BinDir: "/usr/lib/postgresql/9.4/bin/", Password: password, Logger: log.New("component", "postgres"), ExtWhitelist: true, WaitUpstream: true, // TODO(titanous) investigate this: SHMType: "sysv", // the default on 9.4, 'posix' is not currently supported in our containers }) dd := NewDiscoverd(discoverd.DefaultClient.Service(serviceName), log.New("component", "discoverd")) peer := state.NewPeer(inst, singleton, dd, pg, log.New("component", "peer")) shutdown.BeforeExit(func() { peer.Close() }) go peer.Run() shutdown.Fatal(ServeHTTP(pg.(*Postgres), peer, hb, log.New("component", "http"))) // TODO(titanous): clean shutdown of postgres }
func Wait(conf *Conf, afterConn func(*pgx.Conn) error) *DB { if conf == nil { conf = &Conf{ Service: os.Getenv("FLYNN_POSTGRES"), User: os.Getenv("PGUSER"), Password: os.Getenv("PGPASSWORD"), Database: os.Getenv("PGDATABASE"), } } if conf.Discoverd == nil { conf.Discoverd = discoverd.DefaultClient } events := make(chan *discoverd.Event) stream, err := conf.Discoverd.Service(conf.Service).Watch(events) if err != nil { shutdown.Fatal(err) } // wait for service meta that has sync or singleton primary for e := range events { if e.Kind&discoverd.EventKindServiceMeta == 0 || e.ServiceMeta == nil || len(e.ServiceMeta.Data) == 0 { continue } state := &state.State{} json.Unmarshal(e.ServiceMeta.Data, state) if state.Singleton || state.Sync != nil { break } } stream.Close() // TODO(titanous): handle discoverd disconnection // retry here as authentication may fail if DB is still // starting up. // TODO(jpg): switch this to use pgmanager to check if user // exists, we can also check for r/w with pgmanager var db *DB err = connectAttempts.Run(func() error { db, err = Open(conf, afterConn) return err }) if err != nil { panic(err) } for { var readonly string // wait until read-write transactions are allowed if err := db.QueryRow("SHOW default_transaction_read_only").Scan(&readonly); err != nil || readonly == "on" { time.Sleep(100 * time.Millisecond) // TODO(titanous): add max wait here continue } return db } }
func main() { log := logger.New("fn", "main") log.Info("creating cluster and controller clients") hc := &http.Client{Timeout: 5 * time.Second} clusterClient := utils.ClusterClientWrapper(cluster.NewClientWithHTTP(nil, hc)) controllerClient, err := controller.NewClient("", os.Getenv("AUTH_KEY")) if err != nil { log.Error("error creating controller client", "err", err) shutdown.Fatal(err) } s := NewScheduler(clusterClient, controllerClient, newDiscoverdWrapper()) log.Info("started scheduler", "backoffPeriod", s.backoffPeriod) go s.startHTTPServer(os.Getenv("PORT")) if err := s.Run(); err != nil { shutdown.Fatal(err) } shutdown.Exit() }
func main() { defer shutdown.Exit() port := os.Getenv("PORT") addr := ":" + port l, err := net.Listen("tcp", addr) if err != nil { shutdown.Fatal(err) } defer l.Close() log.Println("Listening on", addr) for { conn, err := l.Accept() if err != nil { shutdown.Fatal(err) } go handle(conn) } }
func main() { defer shutdown.Exit() runner := &Runner{ bc: args.BootConfig, events: make(chan Event), networks: make(map[string]struct{}), buildCh: make(chan struct{}, maxConcurrentBuilds), clusters: make(map[string]*cluster.Cluster), ircMsgs: make(chan string), } if err := runner.start(); err != nil { shutdown.Fatal(err) } }