func main() { flag.StringVar(&getIPCommand, "cmd", getIPCommand, "Command to get external IP") flag.StringVar(&zoneID, "zone", zoneID, "Cloudflare Zone ID") flag.StringVar(&name, "name", name, "DNS record to update") flag.StringVar(&authEmail, "email", authEmail, "Cloudflare Auth Email") flag.StringVar(&authKey, "key", authKey, "Cloudflare API Key") flag.IntVar(&ttl, "ttl", ttl, "DNS record TTL (seconds)") flag.DurationVar(&interval, "intv", interval, "External IP check interval") flag.Parse() if name == "" { fmt.Println("Option -name is mandatory") os.Exit(1) } if authEmail == "" { fmt.Println("Option -email is mandatory") os.Exit(1) } if authKey == "" { fmt.Println("Option -key is mandatory") os.Exit(1) } client := cfdns.NewClient(authEmail, authKey) main := suture.NewSimple("main") checker := newIPChecker(getIPCommand, interval) main.Add(checker) updater := newDNSUpdater(name, client, checker.changes) main.Add(updater) main.Serve() }
func startHTTP(cfg *mockedConfig) (string, error) { model := new(mockedModel) httpsCertFile := "../../test/h1/https-cert.pem" httpsKeyFile := "../../test/h1/https-key.pem" assetDir := "../../gui" eventSub := new(mockedEventSub) discoverer := new(mockedCachingMux) connections := new(mockedConnections) errorLog := new(mockedLoggerRecorder) systemLog := new(mockedLoggerRecorder) addrChan := make(chan string) // Instantiate the API service svc := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model, eventSub, discoverer, connections, errorLog, systemLog) svc.started = addrChan // Actually start the API service supervisor := suture.NewSimple("API test") supervisor.Add(svc) supervisor.ServeBackground() // Make sure the API service is listening, and get the URL to use. addr := <-addrChan tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { return "", fmt.Errorf("Weird address from API service: %v", err) } baseURL := fmt.Sprintf("http://127.0.0.1:%d", tcpAddr.Port) return baseURL, nil }
func NewLocal(id protocol.DeviceID, addr string, addrList AddressLister, relayStat RelayStatusProvider) (FinderService, error) { c := &localClient{ Supervisor: suture.NewSimple("local"), myID: id, addrList: addrList, relayStat: relayStat, localBcastTick: time.Tick(BroadcastInterval), forcedBcastTick: make(chan time.Time), localBcastStart: time.Now(), cache: newCache(), } host, port, err := net.SplitHostPort(addr) if err != nil { return nil, err } if len(host) == 0 { // A broadcast client c.name = "IPv4 local" bcPort, err := strconv.Atoi(port) if err != nil { return nil, err } c.startLocalIPv4Broadcasts(bcPort) } else { // A multicast client c.name = "IPv6 local" c.startLocalIPv6Multicasts(addr) } go c.sendLocalAnnouncements() return c, nil }
func newConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, mdl *model.Model, tlsCfg *tls.Config, discoverer discover.Finder, relaySvc *relay.Svc) *connectionSvc { svc := &connectionSvc{ Supervisor: suture.NewSimple("connectionSvc"), cfg: cfg, myID: myID, model: mdl, tlsCfg: tlsCfg, discoverer: discoverer, relaySvc: relaySvc, conns: make(chan model.IntermediateConnection), connType: make(map[protocol.DeviceID]model.ConnectionType), relaysEnabled: cfg.Options().RelaysEnabled, lastRelayCheck: make(map[protocol.DeviceID]time.Time), } cfg.Subscribe(svc) // There are several moving parts here; one routine per listening address // to handle incoming connections, one routine to periodically attempt // outgoing connections, one routine to the the common handling // regardless of whether the connection was incoming or outgoing. // Furthermore, a relay service which handles incoming requests to connect // via the relays. // // TODO: Clean shutdown, and/or handling config changes on the fly. We // partly do this now - new devices and addresses will be picked up, but // not new listen addresses and we don't support disconnecting devices // that are removed and so on... svc.Add(serviceFunc(svc.connect)) for _, addr := range svc.cfg.Options().ListenAddress { uri, err := url.Parse(addr) if err != nil { l.Infoln("Failed to parse listen address:", addr, err) continue } listener, ok := listeners[uri.Scheme] if !ok { l.Infoln("Unknown listen address scheme:", uri.String()) continue } if debugNet { l.Debugln("listening on", uri.String()) } svc.Add(serviceFunc(func() { listener(uri, svc.tlsCfg, svc.conns) })) } svc.Add(serviceFunc(svc.handle)) if svc.relaySvc != nil { svc.Add(serviceFunc(svc.acceptRelayConns)) } return svc }
func newConnectionSvc(cfg *config.Wrapper, myID protocol.DeviceID, model *model.Model, tlsCfg *tls.Config) *connectionSvc { svc := &connectionSvc{ Supervisor: suture.NewSimple("connectionSvc"), cfg: cfg, myID: myID, model: model, tlsCfg: tlsCfg, conns: make(chan *tls.Conn), } // There are several moving parts here; one routine per listening address // to handle incoming connections, one routine to periodically attempt // outgoing connections, and lastly one routine to the the common handling // regardless of whether the connection was incoming or outgoing. It ends // up as in the diagram below. We embed a Supervisor to manage the // routines (i.e. log and restart if they crash or exit, etc). // // +-----------------+ // Incoming | +---------------+-+ +-----------------+ // Connections | | | | | Outgoing // -------------->| | svc.listen | | | Connections // | | (1 per listen | | svc.connect |--------------> // | | address) | | | // +-+ | | | // +-----------------+ +-----------------+ // v v // | | // | | // +------------+-----------+ // | // | svc.conns // v // +-----------------+ // | | // | | // | svc.handle |------> model.AddConnection() // | | // | | // +-----------------+ // // TODO: Clean shutdown, and/or handling config changes on the fly. We // partly do this now - new devices and addresses will be picked up, but // not new listen addresses and we don't support disconnecting devices // that are removed and so on... svc.Add(serviceFunc(svc.connect)) for _, addr := range svc.cfg.Options().ListenAddress { addr := addr listener := serviceFunc(func() { svc.listen(addr) }) svc.Add(listener) } svc.Add(serviceFunc(svc.handle)) return svc }
func main() { const ( cleanIntv = 1 * time.Hour statsIntv = 5 * time.Minute ) var listen string log.SetOutput(os.Stdout) log.SetFlags(0) flag.StringVar(&listen, "listen", ":22027", "Listen address") flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries") flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s") flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets") flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to") flag.StringVar(&backend, "db-backend", backend, "Database backend to use") flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN") flag.Parse() addr, _ := net.ResolveUDPAddr("udp", listen) var err error db, err := sql.Open(backend, dsn) if err != nil { log.Fatalln("sql.Open:", err) } prep, err := setup(backend, db) if err != nil { log.Fatalln("Setup:", err) } main := suture.NewSimple("main") main.Add(&querysrv{ addr: addr, db: db, prep: prep, }) main.Add(&cleansrv{ intv: cleanIntv, db: db, prep: prep, }) main.Add(&statssrv{ intv: statsIntv, file: statsFile, db: db, }) globalStats.Reset() main.Serve() }
func NewService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder, bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service { service := &Service{ Supervisor: suture.NewSimple("connections.Service"), cfg: cfg, myID: myID, model: mdl, tlsCfg: tlsCfg, discoverer: discoverer, conns: make(chan IntermediateConnection), bepProtocolName: bepProtocolName, tlsDefaultCommonName: tlsDefaultCommonName, lans: lans, natService: nat.NewService(myID, cfg), listenersMut: sync.NewRWMutex(), listeners: make(map[string]genericListener), listenerTokens: make(map[string]suture.ServiceToken), curConMut: sync.NewMutex(), currentConnection: make(map[protocol.DeviceID]Connection), } cfg.Subscribe(service) // The rate variables are in KiB/s in the UI (despite the camel casing // of the name). We multiply by 1024 here to get B/s. options := service.cfg.Options() if options.MaxSendKbps > 0 { service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxSendKbps), int64(5*1024*options.MaxSendKbps)) } if options.MaxRecvKbps > 0 { service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*options.MaxRecvKbps), int64(5*1024*options.MaxRecvKbps)) } // There are several moving parts here; one routine per listening address // (handled in configuration changing) to handle incoming connections, // one routine to periodically attempt outgoing connections, one routine to // the the common handling regardless of whether the connection was // incoming or outgoing. service.Add(serviceFunc(service.connect)) service.Add(serviceFunc(service.handle)) raw := cfg.Raw() // Actually starts the listeners and NAT service service.CommitConfiguration(raw, raw) return service }
func (m *usageReportingManager) CommitConfiguration(from, to config.Configuration) bool { if to.Options.URAccepted >= usageReportVersion && m.sup == nil { // Usage reporting was turned on; lets start it. service := newUsageReportingService(m.cfg, m.model) m.sup = suture.NewSimple("usageReporting") m.sup.Add(service) m.sup.ServeBackground() } else if to.Options.URAccepted < usageReportVersion && m.sup != nil { // Usage reporting was turned off m.sup.Stop() m.sup = nil } return true }
func newFolderSummarySvc(m *model.Model) *folderSummarySvc { svc := &folderSummarySvc{ Supervisor: suture.NewSimple("folderSummarySvc"), model: m, stop: make(chan struct{}), immediate: make(chan string), folders: make(map[string]struct{}), foldersMut: sync.NewMutex(), lastEventReqMut: sync.NewMutex(), } svc.Add(serviceFunc(svc.listenForUpdates)) svc.Add(serviceFunc(svc.calculateSummaries)) return svc }
func newFolderSummaryService(cfg *config.Wrapper, m *model.Model) *folderSummaryService { service := &folderSummaryService{ Supervisor: suture.NewSimple("folderSummaryService"), cfg: cfg, model: m, stop: make(chan struct{}), immediate: make(chan string), folders: make(map[string]struct{}), foldersMut: sync.NewMutex(), lastEventReqMut: sync.NewMutex(), } service.Add(serviceFunc(service.listenForUpdates)) service.Add(serviceFunc(service.calculateSummaries)) return service }
func main() { // Instantiate a SIFT server server, err := sift.NewServer(sift.DefaultDBFilepath) if err != nil { panic(err) } if err = server.AddDefaults(); err != nil { panic(err) } // Start the server as a suture process supervisor := suture.NewSimple("movies and chill (SIFT app)") servToken := supervisor.Add(server) defer supervisor.Remove(servToken) go supervisor.ServeBackground() // Run the SIFT script moviesAndChill(server) }
func TestStopAfterBrokenConfig(t *testing.T) { baseDirs["config"] = "../../test/h1" // to load HTTPS keys expandLocations() cfg := config.Configuration{ GUI: config.GUIConfiguration{ RawAddress: "127.0.0.1:0", RawUseTLS: false, }, } w := config.Wrap("/dev/null", cfg) srv, err := newAPIService(protocol.LocalDeviceID, w, "", nil, nil, nil, nil, nil, nil) if err != nil { t.Fatal(err) } srv.started = make(chan struct{}) sup := suture.NewSimple("test") sup.Add(srv) sup.ServeBackground() <-srv.started // Service is now running, listening on a random port on localhost. Now we // request a config change to a completely invalid listen address. The // commit will fail and the service will be in a broken state. newCfg := config.Configuration{ GUI: config.GUIConfiguration{ RawAddress: "totally not a valid address", RawUseTLS: false, }, } if srv.CommitConfiguration(cfg, newCfg) { t.Fatal("Config commit should have failed") } // Nonetheless, it should be fine to Stop() it without panic. sup.Stop() }
func main() { sift.SetLogLevel("debug") //ipv4.Log.SetHandler(log.DiscardHandler()) // ignore ipv4 scanner stuff // Instantiate a SIFT server server, err := sift.NewServer(sift.DefaultDBFilepath) if err != nil { panic(err) } if err = server.AddDefaults(); err != nil { panic(err) } // Start the server as a suture process supervisor := suture.NewSimple("repeat to console (SIFT app)") servToken := supervisor.Add(server) defer supervisor.Remove(servToken) go supervisor.ServeBackground() // Run the SIFT script repeatUpdatesToConsole(server) }
func TestStopAfterBrokenConfig(t *testing.T) { cfg := config.Configuration{ GUI: config.GUIConfiguration{ RawAddress: "127.0.0.1:0", RawUseTLS: false, }, } w := config.Wrap("/dev/null", cfg) srv := newAPIService(protocol.LocalDeviceID, w, "../../test/h1/https-cert.pem", "../../test/h1/https-key.pem", "", nil, nil, nil, nil, nil, nil) srv.started = make(chan string) sup := suture.NewSimple("test") sup.Add(srv) sup.ServeBackground() <-srv.started // Service is now running, listening on a random port on localhost. Now we // request a config change to a completely invalid listen address. The // commit will fail and the service will be in a broken state. newCfg := config.Configuration{ GUI: config.GUIConfiguration{ RawAddress: "totally not a valid address", RawUseTLS: false, }, } if err := srv.VerifyConfiguration(cfg, newCfg); err == nil { t.Fatal("Verify config should have failed") } // Nonetheless, it should be fine to Stop() it without panic. sup.Stop() }
func main() { const ( cleanIntv = 1 * time.Hour statsIntv = 5 * time.Minute ) var listen string log.SetOutput(os.Stdout) log.SetFlags(0) flag.StringVar(&listen, "listen", ":8443", "Listen address") flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries") flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s") flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets") flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to") flag.StringVar(&backend, "db-backend", backend, "Database backend to use") flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN") flag.StringVar(&certFile, "cert", certFile, "Certificate file") flag.StringVar(&keyFile, "key", keyFile, "Key file") flag.BoolVar(&debug, "debug", debug, "Debug") flag.Parse() cert, err := tls.LoadX509KeyPair(certFile, keyFile) if err != nil { log.Fatalln("Failed to load X509 key pair:", err) } db, err := sql.Open(backend, dsn) if err != nil { log.Fatalln("sql.Open:", err) } prep, err := setup(backend, db) if err != nil { log.Fatalln("Setup:", err) } main := suture.NewSimple("main") main.Add(&querysrv{ addr: listen, cert: cert, db: db, prep: prep, }) main.Add(&cleansrv{ intv: cleanIntv, db: db, prep: prep, }) main.Add(&statssrv{ intv: statsIntv, file: statsFile, db: db, }) globalStats.Reset() main.Serve() }
func NewConnectionService(cfg *config.Wrapper, myID protocol.DeviceID, mdl Model, tlsCfg *tls.Config, discoverer discover.Finder, upnpService *upnp.Service, relayService relay.Service, bepProtocolName string, tlsDefaultCommonName string, lans []*net.IPNet) *Service { service := &Service{ Supervisor: suture.NewSimple("connections.Service"), cfg: cfg, myID: myID, model: mdl, tlsCfg: tlsCfg, discoverer: discoverer, upnpService: upnpService, relayService: relayService, conns: make(chan model.IntermediateConnection), bepProtocolName: bepProtocolName, tlsDefaultCommonName: tlsDefaultCommonName, lans: lans, connType: make(map[protocol.DeviceID]model.ConnectionType), relaysEnabled: cfg.Options().RelaysEnabled, lastRelayCheck: make(map[protocol.DeviceID]time.Time), } cfg.Subscribe(service) // The rate variables are in KiB/s in the UI (despite the camel casing // of the name). We multiply by 1024 here to get B/s. if service.cfg.Options().MaxSendKbps > 0 { service.writeRateLimit = ratelimit.NewBucketWithRate(float64(1024*service.cfg.Options().MaxSendKbps), int64(5*1024*service.cfg.Options().MaxSendKbps)) } if service.cfg.Options().MaxRecvKbps > 0 { service.readRateLimit = ratelimit.NewBucketWithRate(float64(1024*service.cfg.Options().MaxRecvKbps), int64(5*1024*service.cfg.Options().MaxRecvKbps)) } // There are several moving parts here; one routine per listening address // to handle incoming connections, one routine to periodically attempt // outgoing connections, one routine to the the common handling // regardless of whether the connection was incoming or outgoing. // Furthermore, a relay service which handles incoming requests to connect // via the relays. // // TODO: Clean shutdown, and/or handling config changes on the fly. We // partly do this now - new devices and addresses will be picked up, but // not new listen addresses and we don't support disconnecting devices // that are removed and so on... service.Add(serviceFunc(service.connect)) for _, addr := range service.cfg.Options().ListenAddress { uri, err := url.Parse(addr) if err != nil { l.Infoln("Failed to parse listen address:", addr, err) continue } listener, ok := listeners[uri.Scheme] if !ok { l.Infoln("Unknown listen address scheme:", uri.String()) continue } l.Debugln("listening on", uri) service.Add(serviceFunc(func() { listener(uri, service.tlsCfg, service.conns) })) } service.Add(serviceFunc(service.handle)) if service.relayService != nil { service.Add(serviceFunc(service.acceptRelayConns)) } return service }
func TestAPIServiceRequests(t *testing.T) { model := new(mockedModel) cfg := new(mockedConfig) httpsCertFile := "../../test/h1/https-cert.pem" httpsKeyFile := "../../test/h1/https-key.pem" assetDir := "../../gui" eventSub := new(mockedEventSub) discoverer := new(mockedCachingMux) relayService := new(mockedRelayService) errorLog := new(mockedLoggerRecorder) systemLog := new(mockedLoggerRecorder) // Instantiate the API service svc, err := newAPIService(protocol.LocalDeviceID, cfg, httpsCertFile, httpsKeyFile, assetDir, model, eventSub, discoverer, relayService, errorLog, systemLog) if err != nil { t.Fatal(err) } _ = svc // Make sure the API service is listening, and get the URL to use. addr := svc.listener.Addr() if addr == nil { t.Fatal("Nil listening address from API service") } tcpAddr, err := net.ResolveTCPAddr("tcp", addr.String()) if err != nil { t.Fatal("Weird address from API service:", err) } baseURL := fmt.Sprintf("http://127.0.0.1:%d", tcpAddr.Port) // Actually start the API service supervisor := suture.NewSimple("API test") supervisor.Add(svc) supervisor.ServeBackground() cases := []httpTestCase{ // /rest/db { URL: "/rest/db/completion?device=" + protocol.LocalDeviceID.String() + "&folder=default", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/db/file?folder=default&file=something", Code: 404, }, { URL: "/rest/db/ignores?folder=default", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/db/need?folder=default", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/db/status?folder=default", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/db/browse?folder=default", Code: 200, Type: "application/json", Prefix: "null", }, // /rest/stats { URL: "/rest/stats/device", Code: 200, Type: "application/json", Prefix: "null", }, { URL: "/rest/stats/folder", Code: 200, Type: "application/json", Prefix: "null", }, // /rest/svc { URL: "/rest/svc/deviceid?id=" + protocol.LocalDeviceID.String(), Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/svc/lang", Code: 200, Type: "application/json", Prefix: "[", }, { URL: "/rest/svc/report", Code: 200, Type: "application/json", Prefix: "{", Timeout: 5 * time.Second, }, // /rest/system { URL: "/rest/system/browse?current=~", Code: 200, Type: "application/json", Prefix: "[", }, { URL: "/rest/system/config", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/config/insync", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/connections", Code: 200, Type: "application/json", Prefix: "null", }, { URL: "/rest/system/discovery", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/error?since=0", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/ping", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/status", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/version", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/debug", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/log?since=0", Code: 200, Type: "application/json", Prefix: "{", }, { URL: "/rest/system/log.txt?since=0", Code: 200, Type: "text/plain", Prefix: "", }, } for _, tc := range cases { t.Log("Testing", tc.URL, "...") testHTTPRequest(t, baseURL, tc) } }
// Serve starts running the SIFT server. Most of the time you'll want to call // in a goroutine; or as a Suture Service (see github.com/thejerf/suture) func (s *Server) Serve() { s.stopOnExitSignal() // capture ^c and SIGTERM, and close gracefully (see: http://stackoverflow.com/a/18158859/3088592) supervisor := suture.NewSimple("sift server") supervisor.Add(s.ipv4Scan) go supervisor.ServeBackground() // Listen for updates from adapters and consider them. var wg1 sync.WaitGroup for i := 0; i < numAdapterUpdateListeners; i++ { wg1.Add(1) go func() { for update := range s.updatesFromAdapters { // Consider the update through the prioritizer if err := s.prioritizer.Consider(update.AdapterDescription, update.update); err != nil { s.log.Error("error while prioritizing update", "err", err) } } wg1.Done() }() } // Listen for updates from the prioritizer channel. These are updates // which should be reflected in the sift data model. var wg2 sync.WaitGroup for i := 0; i < numConfirmedUpdateListeners; i++ { wg2.Add(1) go func() { for update := range s.prioritizer.OutputChan() { s.handleUpdate(update) } wg2.Done() }() } // Wait for less-frequent signals for { select { case <-s.stop: // stop gracefully s.log.Debug("sift server stopping due to stop signal") // stopTimer := time.NewTimer(30 * time.Second) // workersStopped := make(chan struct{}) // go func() { // close(s.updatesFromAdapters) // will cause adapter update listeners to stop // wg1.Wait() // // TODO: CLOSE PRIORITIZER // wg2.Wait() // workersStopped <- struct{}{} // }() // select { // case <-stopTimer.C: // s.log.Warn("timed out waiting for workers to stop, closing instead") // case <-workersStopped: // } if err := s.SiftDB.Close(); err != nil { s.log.Crit("could not gracefully close sift database", "err", err) } s.stopped <- struct{}{} return case ipv4Service := <-s.ipv4Scan.FoundServices(): // new IPv4 service found go s.tryHandlingIPv4Service(ipv4Service) } } }
func NewCachingMux() *CachingMux { return &CachingMux{ Supervisor: suture.NewSimple("discover.cachingMux"), mut: sync.NewMutex(), } }
func main() { const ( cleanIntv = 1 * time.Hour statsIntv = 5 * time.Minute ) var listen string log.SetOutput(os.Stdout) log.SetFlags(0) flag.StringVar(&listen, "listen", ":8443", "Listen address") flag.IntVar(&lruSize, "limit-cache", lruSize, "Limiter cache entries") flag.IntVar(&limitAvg, "limit-avg", limitAvg, "Allowed average package rate, per 10 s") flag.IntVar(&limitBurst, "limit-burst", limitBurst, "Allowed burst size, packets") flag.StringVar(&statsFile, "stats-file", statsFile, "File to write periodic operation stats to") flag.StringVar(&backend, "db-backend", backend, "Database backend to use") flag.StringVar(&dsn, "db-dsn", dsn, "Database DSN") flag.StringVar(&certFile, "cert", certFile, "Certificate file") flag.StringVar(&keyFile, "key", keyFile, "Key file") flag.BoolVar(&debug, "debug", debug, "Debug") flag.BoolVar(&useHTTP, "http", useHTTP, "Listen on HTTP (behind an HTTPS proxy)") flag.Parse() log.Println(LongVersion) var cert tls.Certificate var err error if !useHTTP { cert, err = tls.LoadX509KeyPair(certFile, keyFile) if err != nil { log.Println("Failed to load keypair. Generating one, this might take a while...") cert, err = tlsutil.NewCertificate(certFile, keyFile, "stdiscosrv", 3072) if err != nil { log.Fatalln("Failed to generate X509 key pair:", err) } } devID := protocol.NewDeviceID(cert.Certificate[0]) log.Println("Server device ID is", devID) } db, err := sql.Open(backend, dsn) if err != nil { log.Fatalln("sql.Open:", err) } prep, err := setup(backend, db) if err != nil { log.Fatalln("Setup:", err) } main := suture.NewSimple("main") main.Add(&querysrv{ addr: listen, cert: cert, db: db, prep: prep, }) main.Add(&cleansrv{ intv: cleanIntv, db: db, prep: prep, }) main.Add(&statssrv{ intv: statsIntv, file: statsFile, db: db, }) globalStats.Reset() main.Serve() }