func (d *DB) RecentSuggestionSessions(n uint64) ([]tokenData, error) { psql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar) q := psql. Select("token", "created_at", "data::json->'URL'"). From("samples"). Where(squirrel.Eq{"type": "NewClientToken"}). OrderBy("ID desc"). Limit(n) rows, err := q.RunWith(d.cache).Query() if err != nil { lg.Fatal(err) } defer rows.Close() var tokens []tokenData for rows.Next() { var token tokenData var ID string err := rows.Scan(&ID, &token.CreatedAt, &token.URL) if err != nil { lg.Fatal(err) } token.ID = shared.SuggestionToken(ID) tokens = append(tokens, token) } return tokens, nil }
// Init initializes the server. func Init() error { lg.SetSrcHighlight("alkasir/cmd", "alkasir/pkg") lg.CopyStandardLogTo("INFO") lg.V(1).Info("Log v-level:", lg.Verbosity()) lg.V(1).Info("Active country codes:", shared.CountryCodes) lg.Flush() if *datadirFlag == "" { u, err := user.Current() if err != nil { lg.Fatal(err) } datadir = filepath.Join(u.HomeDir, ".alkasir-central") } else { datadir = *datadirFlag } validCountryCodes = make(map[string]bool, len(shared.CountryCodes)) validCountryCodesMu.Lock() for _, cc := range shared.CountryCodes { validCountryCodes[cc] = true } validCountryCodesMu.Unlock() err := InitDB() if err != nil { lg.Fatalln(err) return err } redisPool = newRedisPool(*redisServer, *redisPassword) internet.SetDataDir(filepath.Join(datadir, "internet")) countryFile := filepath.Join(datadir, "internet", "GeoLite2-Country.mmdb") if _, err := os.Stat(countryFile); os.IsNotExist(err) { // http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz lg.Fatalf("cannot enable IP2CountryCode lookups, %s is missing", countryFile) } else { var err error mmCountryDB, err = maxminddb.Open(countryFile) if err != nil { lg.Fatal(err) } } cityFile := filepath.Join(datadir, "internet", "GeoLite2-City.mmdb") if _, err := os.Stat(cityFile); os.IsNotExist(err) { // http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz lg.Warningf("cannot enable IP2CityGeoNameID lookups, %s is missing", cityFile) } else { mmCityDB, err = maxminddb.Open(cityFile) if err != nil { lg.Fatal(err) } // defer mmCityDB.Close() } return nil }
func RunPatchesCreate(queries []nexus.BuildQuery, privateKey string, publicKey string, nWorkers int) ([]CreatePatchResult, error) { if nWorkers < 1 { nWorkers = 1 } jobC := make(chan CreatePatchJob, 6) var creators sync.WaitGroup for _, v := range queries { creators.Add(1) go func(b nexus.BuildQuery) { defer creators.Done() err := createJobs(b, jobC, privateKey, publicKey) if err != nil { lg.Fatal(err) } }(v) } resC := make(chan CreatePatchResult, 0) var differs sync.WaitGroup for workerN := 0; workerN < nWorkers; workerN++ { differs.Add(1) go func() { defer differs.Done() for job := range jobC { res, err := CreatePatch(job) if err != nil { lg.Fatal(err) } resC <- res } }() } go func() { creators.Wait() close(jobC) differs.Wait() close(resC) }() var patches []CreatePatchResult defer func() { for _, p := range patches { os.Remove(p.job.NewBinary) os.Remove(p.job.OldBinary) } }() for pr := range resC { patches = append(patches, pr) } return patches, nil }
func (d *DB) GetRelatedHosts() (map[string][]string, error) { result := make(map[string][]string, 0) psql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar) s := psql.Select("host", "related").From("hosts_related") rows, err := s.RunWith(d.cache).Query() if err != nil { logSQLErr(err, &s) return nil, err } defer rows.Close() for rows.Next() { var host, related string err := rows.Scan(&host, &related) if err != nil { lg.Fatal(err) } if relateds, ok := result[host]; ok { relateds = append(relateds, related) result[host] = relateds } else { relateds := make([]string, 1) relateds = append(relateds, related) result[host] = relateds } } return result, nil }
func main() { mrand.Seed(time.Now().UnixNano()) errors := []error{ flag.Set("logtostderr", "true"), flag.Set("logcolor", "true"), } for _, err := range errors { if err != nil { panic(err) } } lg.SetSrcHighlight("alkasir/cmd", "alkasir/pkg") lg.CopyStandardLogTo("info") flag.Parse() flagenv.Prefix = "ALKASIR_" flagenv.Parse() err := commandHandler(flag.Args()) if err != nil { if err == errCommandNotFound { fmt.Println("") fmt.Println("Command index:") fmt.Println("") rootCommand.PrintHelp("alkasir-admin", 0) fmt.Println("") os.Exit(1) } lg.Fatal(err) os.Exit(1) } }
func GenerateKeys(random io.Reader) (*[64]byte, *[32]byte) { pub, priv, err := ed25519.GenerateKey(random) if err != nil { lg.Fatal(err) } return priv, pub }
func startMonitoring(addr string) { var redisActiveConn = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "redis_active_conn", Help: "Number of active redis connections.", }) prometheus.MustRegister(redisActiveConn) var redisMaxConn = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "redis_max_conn", Help: "Maximum number of redis connections.", }) prometheus.MustRegister(redisMaxConn) http.Handle("/metrics", prometheus.Handler()) redisMaxConn.Set(float64(redisPool.MaxActive)) go func() { tick := time.NewTicker(1 * time.Second) for range tick.C { if redisPool == nil { redisActiveConn.Set(0) } else { redisActiveConn.Set(float64(redisPool.ActiveCount())) } } }() err := http.ListenAndServe(addr, nil) if err != nil { lg.Fatal(err) } }
func debugPprof(args []string) error { var dir string if len(args) == 0 { dir = debugLatestImport() } else { dir = args[0] } const profile = "heap" var header debugexport.DebugHeader data, err := ioutil.ReadFile(filepath.Join(dir, "header.json")) if err != nil { lg.Fatal(err) } err = json.Unmarshal(data, &header) if err != nil { lg.Fatal(err) } lg.Infof("%+v", header) q := nexus.BuildQuery{ OS: header.OS, Arch: header.Arch, Version: header.Version, Cmd: "alkasir-gui", } cmdlocation, err := q.GetMatchingBuildBinary() if err != nil { lg.Fatal(err) } var cmdargs []string cmdargs = append(cmdargs, "tool", "pprof", cmdlocation, filepath.Join(dir, profile+".txt")) cmd := exec.Command("go", cmdargs...) cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr return cmd.Run() }
func Read(bank int) ([]omron.Entry, error) { counted := C.m_count(C.int(bank)) var entries []omron.Entry for i := 0; i < int(counted)+1; i++ { ret := C.m_read(C.int(bank), C.int(i)) str := C.GoString(ret) C.free(unsafe.Pointer(ret)) if str == "" { fmt.Print(".") } else { fmt.Print("*") fields := strings.Split(str, ",") t, err := time.ParseInLocation("2006-01-02 15:04:05", fields[0], time.Local) if err != nil { lg.Fatal(err) } sys, err := strconv.Atoi(fields[1]) if err != nil { lg.Fatal(err) } dia, err := strconv.Atoi(fields[2]) if err != nil { lg.Fatal(err) } pulse, err := strconv.Atoi(fields[3]) if err != nil { lg.Fatal(err) } entry := omron.Entry{ Time: t, Sys: sys, Dia: dia, Pulse: pulse, Bank: bank, } entries = append(entries, entry) } } fmt.Print("\n") return entries, nil }
// Quickrunner for the latest archived release func QuickReleaseRunner(cmd string) { q := BuildQuery{ OS: runtime.GOOS, Arch: runtime.GOARCH, Cmd: cmd, } artifacts, err := q.GetVersions() latest := artifacts.Latest() q.GetBinary(latest) bin, err := q.cmdGlob() if err != nil { lg.Fatal(err) } err = latest.Run(bin) if err != nil { lg.Fatal(err) } }
func testPatch(pr CreatePatchResult, publicKey string) error { lg.Infof("verifying %s %s>%s", pr.Artifact, pr.OldVersion, pr.NewVersion) tmpfile := fmt.Sprintf("/tmp/%s-%s-o", pr.Artifact, pr.OldVersion) err := cp(tmpfile, pr.job.OldBinary) if err != nil { lg.Fatal(err) } defer func() { err = os.Remove(tmpfile) if err != nil { lg.Errorln(err) } }() sum, err := base64.RawURLEncoding.DecodeString(pr.SHA256Sum) if err != nil { return err } sig, err := upgradebin.DecodeSignature(pr.ED25519Signature) if err != nil { return err } pub, err := upgradebin.DecodePublicKey([]byte(publicKey)) if err != nil { return err } opts := update.Options{ Patcher: update.NewBSDiffPatcher(), Verifier: upgradebin.NewED25519Verifier(), Hash: crypto.SHA256, Checksum: sum, Signature: sig[:], PublicKey: pub, TargetPath: tmpfile, } diffFile, err := os.Open(pr.DiffFile) if err != nil { return err } defer diffFile.Close() err = update.Apply(diffFile, opts) if err != nil { return err } return nil }
func startAnalysis(clients db.Clients) { go func() { tick := time.NewTicker(10 * time.Second) lastID, err := clients.DB.GetLastProcessedSampleID() if err != nil { lg.Warningln(err) } lg.Infof("starting analysis from sample ID %d", lastID) lastPersistedID := lastID for range tick.C { results, err := clients.DB.GetSamples(uint64(lastID), "") if err != nil { lg.Fatal(err) } n := 0 start := time.Now() loop: for s := range results { n++ if s.ID > lastID { lastID = s.ID } if s.Type == "NewClientToken" { if !shared.AcceptedHost(s.Host) { lg.Warningln("not accepted host id:", s.ID, s.Host) continue loop } err := clients.DB.PublishHost(s) if err != nil { lg.Warning(err) } } } if n != 0 && lg.V(15) { lg.Infof("processed %d samples in %s", n, time.Since(start).String()) } if lastID != lastPersistedID { err = clients.DB.SetLastProcessedSampleID(lastID) if err != nil { lg.Errorln(err) } else { lastPersistedID = lastID } } } }() }
func (r *relatedHosts) update() { lg.V(19).Infoln("updating related hosts..") related, err := r.dbclients.DB.GetRelatedHosts() if err != nil { lg.Fatal(err) } curated := make(map[string][]string, len(related)) for k, v := range related { curated[strings.TrimPrefix(k, "www.")] = v } r.Lock() r.items = curated r.Unlock() }
func main() { runtime.LockOSThread() flag.Set("logtostderr", "true") lg.CopyStandardLogTo("INFO") lg.SetSrcHighlight("libomron") flag.Parse() bdb, err := bolt.Open("bpchart.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { lg.Fatal(err) } db := &db.DB{DB: bdb, BucketName: []byte(entriesBucketName)} var wg sync.WaitGroup wg.Add(1) go func() { lg.Fatal(httpServer(db)) wg.Done() }() wg.Add(1) go func() { if FAKE { err = fakeImportFromDevice(db) } else { err = importFromDevice(db) } if err != nil { lg.Errorln(err) } wg.Done() }() wg.Wait() }
// Wait blocks until the underlying process is stopped func (s *Service) wait() { if s.isCopy { lg.Fatal("wait called on copy of service!") } if s.cmd != nil { lg.V(10).Infof("Waiting for process %s to exit", s.ID) err := s.cmd.Wait() if err != nil { lg.Warningln(err) } lg.V(10).Infof("%s exited", s.ID) } s.waiter.Wait() }
func debugLatestImport() string { files, err := ioutil.ReadDir("alkasir-debug-reports") if err != nil { lg.Fatal(err) } var dirs []string for _, v := range files { if v.IsDir() { dirs = append(dirs, v.Name()) } } sort.Strings(dirs) return filepath.Join("alkasir-debug-reports", dirs[len(dirs)-1]) }
// Create a new service instance. // note: name is not yet a decided requirement. func NewService(name string) (s *Service) { s = &Service{ ID: serviceIdGen.New(), Name: name, Request: make(map[string]string), Response: make(map[string]string), Methods: &Methods{ list: make([]*Method, 0), }, } err := ManagedServices.add(s) if err != nil { lg.Fatal(err) } return }
// GetBinary downloads artifact, extracts archive and returns the path to the // extracted executable. If the file already exits it is not downloaded. func (q *BuildQuery) GetBinary(artifact *Artifact) (string, error) { cmdGlob, err := q.cmdGlob() if err != nil { return "", err } if _, err := os.Stat(artifact.Path()); os.IsNotExist(err) { err = artifact.Download() if err != nil { lg.Fatal(err) } } gp, err := artifact.GlobPath(cmdGlob) if err != nil { return "", err } if len(gp) > 0 { return gp[0], nil } err = artifact.Extract() if err != nil { // retry download if extraction fails err = artifact.Download() if err != nil { return "", err } err := artifact.Extract() if err != nil { return "", err } } gp, err = artifact.GlobPath(cmdGlob) if err != nil { return "", err } if len(gp) < 1 { lg.Fatalf("no glob match for '%s' in %s %s", cmdGlob, artifact.Version, q.ArtifactDisplayName()) } return gp[0], nil }
func main() { var bindaddr = flag.String("bindaddr", "0.0.0.0:7245", "bind address") rand.Seed(time.Now().UnixNano()) flag.Parse() flagenv.Prefix = "ALKASIR_WANIP_SERVER_" flagenv.Parse() lg.CopyStandardLogTo("INFO") http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { ipAddress, _, _ := net.SplitHostPort(r.RemoteAddr) fmt.Fprintf(w, "%s", ipAddress) if lg.V(5) { lg.Infof("returning %s", ipAddress) } }) lg.Infof("Listening to http://%s", *bindaddr) err := http.ListenAndServe(*bindaddr, nil) if err != nil { lg.Fatal(err) } }
// SetConfig reads configuration from json byte stream func parseConfig(config []byte) (*Settings, error) { s := &Settings{} err := json.Unmarshal(config, &s) if err != nil { return nil, err } for i, c := range s.Connections { err := c.EnsureID() if err != nil { lg.Fatal(err) } lg.V(15).Infof("connection id: %s", c.ID) if lg.V(50) { v, _ := c.Encode() lg.Infof("connection encoded: %s", v) lg.Infof("connection full: %+v", c) } s.Connections[i] = c } return s, nil }
func startMonitoring(addr string) { expvar.Publish("Goroutines", expvar.Func(goroutines)) expvar.Publish("Uptime", expvar.Func(uptime)) redisActiveConn := expvar.NewInt("redis_pool_conn_active") redisMaxConn := expvar.NewInt("redis_pool_conn_max") redisMaxConn.Set(int64(redisPool.MaxActive)) go func() { tick := time.NewTicker(time.Duration(1 * time.Second)) for range tick.C { if redisPool == nil { redisActiveConn.Set(0) } else { redisActiveConn.Set(int64(redisPool.ActiveCount())) } } }() err := http.ListenAndServe(addr, nil) if err != nil { lg.Fatal(err) } }
func (d *DB) GetBlockedHosts(CountryCode string, ASN int) ([]string, error) { psql := squirrel.StatementBuilder.PlaceholderFormat(squirrel.Dollar) s := psql.Select("host").From("hosts_publish").Where(squirrel.Eq{ "country_code": CountryCode, "asn": ASN, }) rows, err := s.RunWith(d.cache).Query() if err != nil { logSQLErr(err, &s) return nil, err } defer rows.Close() var hosts []string for rows.Next() { var host string err := rows.Scan(&host) if err != nil { lg.Fatal(err) } hosts = append(hosts, host) } return hosts, nil }
// Initialize service func (s *Service) initService() error { cmd := s.cmd stdout, err := cmd.StdoutPipe() if err != nil { return err } s.stdout = stdout stderr, err := cmd.StderrPipe() if err != nil { return err } s.stderr = stderr stdin, err := cmd.StdinPipe() if err != nil { return err } s.stdin = stdin if lg.V(5) { alkasirEnv := "" for _, v := range s.cmd.Env { if strings.HasPrefix(v, "ALKASIR_") { alkasirEnv += v + " " } } lg.Infof("Starting service: %s %s", alkasirEnv, cmd.Path) } err = cmd.Start() if err != nil { return err } scanner := bufio.NewScanner(stdout) var transportErrMsg string transportErr := false var line string for scanner.Scan() { line = scanner.Text() lg.V(5).Infoln("DBG: ", line) if errorM.MatchString(line) { transportErr = true transportErrMsg = line return errors.New("error: " + transportErrMsg) } else if doneM.MatchString(line) { break } else if exposeM.MatchString(line) { match := exposeM.FindStringSubmatch(line) s.Response["bindaddr"] = match[3] s.Response["protocol"] = match[2] s.registerMethod(match[1], match[2], match[3]) } else if versionM.MatchString(line) { } else if parentM.MatchString(line) { match := parentM.FindStringSubmatch(line) s.Response["parentaddr"] = match[3] } else { lg.Infoln("not handled line:", line) return errors.New("unhandeled line") } } if transportErr { err := cmd.Wait() if err != nil { lg.Warningln(err) } lg.Fatal(transportErrMsg) return errors.New("transport err") } return err }
// Init does precondition check if the application can/should be started. // Init will return an error message with reason for exit printed. func Run() { if debugEnabled { log.Println("ALKASIR_DEBUG ENABLED!") err := os.Setenv("ALKASIR_DEBUG", "1") if err != nil { log.Fatal(err) } } if hotEnabled { log.Println("ALKASIR_HOT ENABLED!") err := os.Setenv("ALKASIR_HOT", "1") if err != nil { log.Fatal(err) } } // the darwin systray does not exit the main loop if runtime.GOOS != "darwin" { uiRunning.Add(1) } err := ui.Run(func() { Atexit(ui.Done) // start the getpublic ip updater. go func() { _ = shared.GetPublicIPAddr() }() if debugEnabled { go func() { err := http.ListenAndServe( fmt.Sprintf("localhost:%d", debugPort), nil) if err != nil { panic(err) } }() } // wipe user data if wipeData { settingsdir := clientconfig.ConfigPath() if settingsdir == "" { log.Println("[wipe] Configdir not set") os.Exit(1) } settingsfile := clientconfig.ConfigPath("settings.json") if _, err := os.Stat(settingsfile); os.IsNotExist(err) { log.Println("[wipe] No settings.json in configdir, will NOT wipe data") os.Exit(1) } log.Println("Wiping all user data") if err := os.RemoveAll(settingsdir); err != nil { log.Println(err) } } // Prepare logging logdir := clientconfig.ConfigPath("log") err := os.MkdirAll(logdir, 0775) if err != nil { log.Println("Could not create logging directory") os.Exit(1) } err = flag.Set("log_dir", logdir) if err != nil { panic(err) } lg.SetSrcHighlight("alkasir/cmd", "alkasir/pkg") lg.CopyStandardLogTo("INFO") // Start init if VERSION != "" { lg.Infoln("Alkasir v" + VERSION) } else { lg.Warningln("Alkasir dev version (VERSION not set)") } lg.V(1).Info("Log v-level:", lg.Verbosity()) _, err = clientconfig.Read() if err != nil { lg.Infoln("Could not read config") exit() } lg.V(30).Infoln("settings", clientconfig.Get().Settings) if saveChromeExt { err := saveChromeExtension() if err != nil { lg.Fatal(err) } } { configChanged, err := clientconfig.UpgradeConfig() if err != nil { lg.Fatalln("Could not upgrade config", err) } clientconfig.Update(func(conf *clientconfig.Config) error { if clientAuthKeyFlag != "" { lg.Warningln("Overriding generated authKey with", clientAuthKeyFlag) conf.Settings.Local.ClientAuthKey = clientAuthKeyFlag configChanged = true } if bindAddrFlag != "" { lg.Warningln("Overriding configured bindAddr with", bindAddrFlag) conf.Settings.Local.ClientBindAddr = bindAddrFlag configChanged = true } if centralAddrFlag != "" { lg.Warningln("Overriding central server addr with", centralAddrFlag) conf.Settings.Local.CentralAddr = centralAddrFlag configChanged = true } return nil }) if configChanged { if err := clientconfig.Write(); err != nil { lg.Warning(err) } } } conf := clientconfig.Get() loadTranslations(LanguageOptions...) if err := ui.Language(conf.Settings.Local.Language); err != nil { lg.Warningln(err) } go func() { select { case <-sigIntC: exit() case <-ui.Actions.Quit: exit() } }() for _, e := range []error{ mime.AddExtensionType(".json", "application/json"), mime.AddExtensionType(".js", "application/javascript"), mime.AddExtensionType(".css", "text/css"), mime.AddExtensionType(".md", "text/plain"), } { if e != nil { lg.Warning(e) } } err = startInternalHTTPServer(conf.Settings.Local.ClientAuthKey) if err != nil { lg.Fatal("could not start internal http services") } // Connect the default transport service.UpdateConnections(conf.Settings.Connections) service.UpdateTransports(conf.Settings.Transports) go service.StartConnectionManager(conf.Settings.Local.ClientAuthKey) // TODO: async pac.UpdateDirectList(conf.DirectHosts.Hosts) pac.UpdateBlockedList(conf.BlockedHostsCentral.Hosts, conf.BlockedHosts.Hosts) lastBlocklistChange = time.Now() go StartBlocklistUpgrader() if upgradeDiffsBaseURL != "" { lg.V(19).Infoln("upgradeDiffsBaseURL is ", upgradeDiffsBaseURL) go StartBinaryUpgradeChecker(upgradeDiffsBaseURL) } else { lg.Warningln("empty upgradeDiffsBaseURL, disabling upgrade checks") } lg.V(5).Info("Alkasir has started") }) // the darwin systray does not exit the main loop if runtime.GOOS != "darwin" { uiRunning.Done() } lg.Infoln("ui.Run ended") if err != nil { log.Println("client.Run error:", err) } }
func createJobs(q nexus.BuildQuery, jobC chan CreatePatchJob, privateKey string, publicKey string) error { versions, err := q.GetVersions() if err != nil { lg.Fatal(err) } sort.Sort(versions) sort.Sort(sort.Reverse(versions)) if len(versions) < 2 { return errors.New("too few versions") } latestVersion := versions[0] lg.V(20).Infoln("latest version", latestVersion) { if len(versions) > PatchHistoryAmountMax+1 { versions = versions[1 : PatchHistoryAmountMax+1] } else { versions = versions[1:] } } lg.V(20).Infoln("old versions", versions) // TODO: reimplement this check so that upgrade processing can be resumed. // if _, err := os.Stat(jsonname); err == nil { // lg.Infof("%s exists, skipping processing", jsonname) // return nil // } latestBinPath, err := q.GetBinary(latestVersion) if err != nil { return err } lg.Infof("creating patchJobs for %s %s %s", latestVersion.ArtifactID, latestVersion.Classifier, latestVersion.Version, ) for _, v := range versions { bp, err := q.GetBinary(v) if err != nil { return err } j := CreatePatchJob{ Artifact: fmt.Sprintf("%s-%s", latestVersion.ArtifactID, latestVersion.Classifier), OldBinary: bp, NewBinary: latestBinPath, NewVersion: latestVersion.Version, OldVersion: v.Version, PrivateKey: privateKey, PublicKey: publicKey, } lg.V(10).Infof("sending created job %s", j.Artifact) jobC <- j lg.V(10).Infof("sent job %s", j.Artifact) } lg.Infof("all jobs created for %s", q.ArtifactDisplayName()) return nil }
func (m *modifyConnections) Update() []shared.Connection { lg.Infoln("updating connections..") if lg.V(19) { lg.Infof("pre upgrade state:") for _, v := range m.Connections { lg.Infoln(v) } } // create map for id lookups conns := make(map[string]shared.Connection, 0) for _, connection := range m.Connections { conns[connection.ID] = connection } // remove old old connections for _, ID := range m.Remove { if _, ok := conns[ID]; ok { lg.V(19).Infof("remove connection: %s", ID) delete(conns, ID) } } // add new connections for _, v := range m.Add { conn, err := shared.DecodeConnection(v) if err != nil { lg.Fatal(err) } ID := conn.ID if _, ok := conns[ID]; !ok { lg.V(19).Infof("add connection: %s", ID) conns[ID] = conn } } // protect connections for _, ID := range m.Protect { if _, ok := conns[ID]; ok { c := conns[ID] c.Protected = true conns[ID] = c lg.V(19).Infof("protected connection: %s", ID) } } var result []shared.Connection for _, v := range conns { result = append(result, v) } if lg.V(19) { lg.Infof("upgraded connections result:") for _, v := range result { lg.Infoln(v) } } return result }
func createUpgradeAuto(args []string) error { var ( privPemFlag string pubPemFlag string ) fs := flag.NewFlagSet("upgrade create", flag.ExitOnError) fs.StringVar(&privPemFlag, "privpem", "upgrades-private-key.pem", "path to load private key file from") fs.StringVar(&pubPemFlag, "pubpem", "upgrades-public-key.pem", "path to load public key file from") fs.Parse(args) args = fs.Args() privPem, err := ioutil.ReadFile(privPemFlag) if err != nil { if os.IsNotExist(err) { lg.Errorf("%s does not exist", privPemFlag) return nil } return err } pubPem, err := ioutil.ReadFile(pubPemFlag) if err != nil { if os.IsNotExist(err) { lg.Errorf("%s does not exist", pubPemFlag) return nil } return err } results, err := makepatch.RunPatchesCreate( jobQs, string(privPem), string(pubPem), nWorkersFlag) if err != nil { panic(err) } if len(results) < 1 { lg.Fatalln("no patch results returned") } var allFiles []*tar.Header err = filepath.Walk("diffs", func(path string, f os.FileInfo, err error) error { if f.IsDir() { return nil } allFiles = append(allFiles, &tar.Header{ Name: path, Mode: 0600, Size: f.Size(), }) return nil }) if err != nil { lg.Fatal(err) } latestVersion := results[0].NewVersion filename := fmt.Sprintf("alkasir-binpatches-for-%s.tar", latestVersion) tarfile, err := os.Create(filename) if err != nil { panic(err) } tw := tar.NewWriter(tarfile) for _, hdr := range allFiles { if err := tw.WriteHeader(hdr); err != nil { log.Fatalln(err) } s, err := os.Open(hdr.Name) if err != nil { return err } _, err = io.Copy(tw, s) if err != nil { lg.Fatal(err) } err = s.Close() if err != nil { lg.Fatal(err) } } if err := tw.Close(); err != nil { log.Fatalln(err) } lg.Infoln("done") return nil }
func httpServer(db *db.DB) error { http.HandleFunc("/json/", func(w http.ResponseWriter, r *http.Request) { dtMin := time.Now().Add(-time.Hour * 24 * 7 * 4) dtMax := time.Now().Add(time.Minute) { type stp struct { t *time.Time qp string } for _, v := range []stp{ {&dtMin, "dt_min"}, {&dtMax, "dt_max"}, } { ts := r.URL.Query().Get(v.qp) t, err := time.Parse(time.RFC3339, ts) if err != nil { t, err = time.Parse("2006-01-02", ts) if err != nil { continue } } *v.t = t } } avgMinutes := 10 if r.URL.Query().Get("avg_minutes") != "" { var err error avgMinutes, err = strconv.Atoi(r.URL.Query().Get("avg_minutes")) if err != nil { lg.Fatalln(err) } } allEntries, err := db.All() var filteredEntires []omron.Entry for _, entry := range allEntries { if entry.Time.After(dtMin) && entry.Time.Before(dtMax) { filteredEntires = append(filteredEntires, entry) } } avgEntries := omron.AvgWithinDuration( filteredEntires, time.Duration(avgMinutes)*time.Minute) scoredEntries := score.All(avgEntries) w.Header().Set("Content-type", "application/json") data, err := json.MarshalIndent(scoredEntries, "", " ") if err != nil { lg.Fatal(err) } w.Write(data) }) http.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer( http.Dir("build/assets")))) http.Handle("/", http.FileServer( http.Dir("browser/html"))) return http.ListenAndServe(":8080", nil) }
// Run runs the initialized server. func Run() { var wg sync.WaitGroup // start monitor server go startMonitoring(*monitorBindAddr) // start the getpublic ip updater. go func() { _ = shared.GetPublicIPAddr() }() wg.Add(1) go func() { defer wg.Done() lg.V(2).Infoln("Loading recent sessions from postgres...") recents, err := sqlDB.RecentSuggestionSessions(20000) if err != nil { lg.Fatal(err) } db.SessionTokens.Reset(recents) lg.V(2).Infof("Loaded %d sessions from postgres...", len(recents)) lg.Flush() }() wg.Add(1) go func() { defer wg.Done() conn := redisPool.Get() defer conn.Close() lg.V(2).Infoln("BGPDump refresh started...") n, err := internet.RefreshBGPDump(conn) lg.V(2).Infof("BGPDump refresh ended, %d items added.", n) lg.Flush() if err != nil { if *offline { lg.Infoln("offline", err) } else { lg.Fatal(err) } } }() wg.Add(1) go func() { defer wg.Done() conn := redisPool.Get() defer conn.Close() lg.V(2).Infoln("CIDRReport refresh started...") n, err := internet.RefreshCIDRReport(conn) lg.V(2).Infof("CIDRReport refresh ended, %d items added", n) if err != nil { if *offline { lg.Infoln("offline", err) } else { lg.Fatal(err) } } }() wg.Wait() // start signal handling wg.Add(1) go func() { ch := make(chan os.Signal) signal.Notify(ch, syscall.SIGINT) lg.Infoln(<-ch) wg.Done() }() internetClient := db.NewInternetClient(redisPool) maxmindClient := db.NewMaxmindClient(mmCountryDB, mmCityDB) clients := db.Clients{ DB: sqlDB, Internet: internetClient, Maxmind: maxmindClient, } // start http json api server go func(addr string, dba db.Clients) { mux, err := apiMux(dba) lg.Info("Starting http server", addr) err = http.ListenAndServe(addr, mux) if err != nil { lg.Fatal(err) } }(*apiBindAddr, clients) // start http export api server go func(addr string, dba db.Clients) { if *exportApiSecretKey == "" { lg.Warningln("exportApiSecretKey flag/env not set, will not start export api server") b := make([]byte, 32) _, err := rand.Read(b) if err != nil { lg.Fatalf("random generator not functioning...") return } suggestedkey := base64.StdEncoding.EncodeToString(b) lg.Infoln("suggested export key:", suggestedkey) return } key, err := base64.StdEncoding.DecodeString(*exportApiSecretKey) if err != nil { lg.Fatalf("could not decode export api secret key: %s", *exportApiSecretKey) } mux, err := apiMuxExport(dba, key) lg.Info("Starting export api server", addr) err = http.ListenAndServe(addr, mux) if err != nil { lg.Fatal(err) } }(*exportApiBindAddr, clients) go analysis.StartAnalysis(clients) startMeasurer(clients) wg.Wait() }