func scoreStatusCode(client, central int) float64 { if central < 400 && client > 400 { lg.Infof("status ranges %d != %d", client, central) return 1.0 } else if client != central { lg.Infof("status %d != %d", client, central) return 0.7 } else if client != central { return 0.0 } return 0.5 }
func startAnalysis(clients db.Clients) { go func() { tick := time.NewTicker(10 * time.Second) lastID, err := clients.DB.GetLastProcessedSampleID() if err != nil { lg.Warningln(err) } lg.Infof("starting analysis from sample ID %d", lastID) lastPersistedID := lastID for range tick.C { results, err := clients.DB.GetSamples(uint64(lastID), "") if err != nil { lg.Fatal(err) } n := 0 start := time.Now() loop: for s := range results { n++ if s.ID > lastID { lastID = s.ID } if s.Type == "NewClientToken" { if !shared.AcceptedHost(s.Host) { lg.Warningln("not accepted host id:", s.ID, s.Host) continue loop } err := clients.DB.PublishHost(s) if err != nil { lg.Warning(err) } } } if n != 0 && lg.V(15) { lg.Infof("processed %d samples in %s", n, time.Since(start).String()) } if lastID != lastPersistedID { err = clients.DB.SetLastProcessedSampleID(lastID) if err != nil { lg.Errorln(err) } else { lastPersistedID = lastID } } } }() }
func StartAnalysis(clients db.Clients) { tick := time.NewTicker(5 * time.Second) lastID, err := clients.DB.GetLastProcessedSampleID() if err != nil { lg.Warningln(err) } for n := 0; n < 4; n++ { go sessionFetcher(clients) go samplesAnalyzer() go hostPublisher(clients) } lg.Infof("starting analysis from sample ID %d", lastID) lastPersistedID := lastID for range tick.C { results, err := clients.DB.GetSamples(uint64(lastID), "") if err != nil { lg.Errorf("database err (skipping): %v", err) continue } n := 0 start := time.Now() for s := range results { n++ if s.ID > lastID { lastID = s.ID } if s.Origin == "Central" && s.Type == "HTTPHeader" { sessionFetchC <- s.Token } } if n != 0 && lg.V(15) { lg.Infof("processed %d samples in %s", n, time.Since(start).String()) } if lastID != lastPersistedID { err = clients.DB.SetLastProcessedSampleID(lastID) if err != nil { lg.Errorln(err) } else { lastPersistedID = lastID } } } }
func (b *BuildQuery) getVersion() (*Artifact, error) { lg.Infof("Getting versions of %s", b.ArtifactDisplayName()) c, err := getNexusClient() if err != nil { return nil, err } artifacts, err := c.Artifacts( search.InRepository{ RepositoryID: repoID, Criteria: search.ByCoordinates{ GroupID: "com.alkasir", Version: b.Version, ArtifactID: b.Cmd, Classifier: b.Classifier(), }, }, ) if err != nil { return nil, err } if len(artifacts) != 1 { lg.Infoln(artifacts) return nil, errors.New("one match expected") } return &Artifact{Artifact: artifacts[0]}, nil }
func PostTransportTraffic(w rest.ResponseWriter, r *rest.Request) { form := shared.TransportTraffic{} err := r.DecodeJsonPayload(&form) if err != nil { apiutils.WriteRestError(w, apierrors.NewInternalError(err)) return } transportTrafficMu.Lock() defer transportTrafficMu.Unlock() transportTraffic = form if lg.V(10) { if len(transportTrafficLog) == 6 { lg.Infof("transport traffic: %.0fkb/s %.0fkb/s %.0fkb/s %.0fkb/s %.0fkb/s %.0fkb/s", (transportTrafficLog[0].Throughput)/1024, (transportTrafficLog[1].Throughput)/1024, (transportTrafficLog[2].Throughput)/1024, (transportTrafficLog[3].Throughput)/1024, (transportTrafficLog[4].Throughput)/1024, (transportTrafficLog[5].Throughput)/1024, ) transportTrafficLog = make([]shared.TransportTraffic, 0) } if transportTraffic.Throughput > 1024 { transportTrafficLog = append(transportTrafficLog, form) } } response := true w.WriteJson(response) }
func testConn(event *ConnectionEvent) error { defaultTransportM.RLock() defer defaultTransportM.RUnlock() if defaultTransport == nil { transportOkC <- false event.newState(TestFailed) event.newState(NotConfigured) event.newState(Ended) return errors.New("No active transport") } err := testSocks5Internet(defaultTransport.Service.Response["bindaddr"]) if err != nil { transportOkC <- false event.newState(TestFailed) event.newState(Failed) event.newState(Ended) return errors.New("Http get test failed") } else { if event.State != Up && lg.V(4) { lg.Infof("event: tested %s -> %s (%s)", event.State, Up, event.ServiceID) } transportOkC <- true if event.State != Up { event.newState(Up) } } transportOkC <- true return nil }
func (b *BuildQuery) GetVersions() (Artifacts, error) { lg.Infof("Getting versions of %s", b.ArtifactDisplayName()) c, err := getNexusClient() if err != nil { return nil, err } artifacts, err := c.Artifacts( search.InRepository{ RepositoryID: repoID, Criteria: search.ByCoordinates{ GroupID: "com.alkasir", ArtifactID: b.Cmd, Classifier: b.Classifier(), }, }, ) if err != nil { return nil, err } var result Artifacts for _, na := range artifacts { a, err := newArtifact(na) if err != nil { return nil, err } result = append(result, a) } sort.Sort(result) return result, nil }
// Download downloads the artifact to a local directory func (a *Artifact) Download() error { shortURL := a.Info().URL shortURL = shortURL[strings.LastIndex(shortURL, "/")+1:] lg.Infof("Downloading %s", shortURL) err := os.MkdirAll(a.Dir("dl"), 0775) if err != nil { return err } err = download(a.Path(), a.Info().URL) if err != nil { lg.Warningf("Error downloading %s", shortURL) } else { lg.Infof("Downloaded %s", shortURL) } return err }
func (d *DebugResponse) WriteToDisk() error { dir := d.filename() if err := os.MkdirAll(dir, 0775); err != nil { panic(err) } writeTextFile := func(data []string, basename string) error { filename := filepath.Join(dir, basename+".txt") f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY, 0665) if err != nil { return err } defer f.Close() for _, v := range data { _, err := f.WriteString(v + "\n") if err != nil { return err } } return nil } writeJsonFile := func(data interface{}, basename string) error { bytes, err := json.MarshalIndent(&data, "", " ") if err != nil { panic(err) } filename := filepath.Join(dir, basename+".json") err = ioutil.WriteFile(filename, bytes, 0665) if err != nil { return err } return nil } failed := false for _, v := range []error{ writeJsonFile(d.Header, "header"), writeTextFile(d.Log, "log"), writeTextFile(d.Heap, "heap"), writeTextFile(d.GoRoutines, "goroutines"), writeTextFile(d.Block, "block"), writeTextFile(d.ThreadCreate, "threadcreate"), writeJsonFile(d.Config, "config"), } { if v != nil { failed = true lg.Error(v) } } if failed { return fmt.Errorf("errors writing out report %s", dir) } lg.Infof("wrote report for %s", dir) return nil }
func main() { var bindaddr = flag.String("bindaddr", "0.0.0.0:7245", "bind address") rand.Seed(time.Now().UnixNano()) flag.Parse() flagenv.Prefix = "ALKASIR_WANIP_SERVER_" flagenv.Parse() lg.CopyStandardLogTo("INFO") http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { ipAddress, _, _ := net.SplitHostPort(r.RemoteAddr) fmt.Fprintf(w, "%s", ipAddress) if lg.V(5) { lg.Infof("returning %s", ipAddress) } }) lg.Infof("Listening to http://%s", *bindaddr) err := http.ListenAndServe(*bindaddr, nil) if err != nil { lg.Fatal(err) } }
func testPatch(pr CreatePatchResult, publicKey string) error { lg.Infof("verifying %s %s>%s", pr.Artifact, pr.OldVersion, pr.NewVersion) tmpfile := fmt.Sprintf("/tmp/%s-%s-o", pr.Artifact, pr.OldVersion) err := cp(tmpfile, pr.job.OldBinary) if err != nil { lg.Fatal(err) } defer func() { err = os.Remove(tmpfile) if err != nil { lg.Errorln(err) } }() sum, err := base64.RawURLEncoding.DecodeString(pr.SHA256Sum) if err != nil { return err } sig, err := upgradebin.DecodeSignature(pr.ED25519Signature) if err != nil { return err } pub, err := upgradebin.DecodePublicKey([]byte(publicKey)) if err != nil { return err } opts := update.Options{ Patcher: update.NewBSDiffPatcher(), Verifier: upgradebin.NewED25519Verifier(), Hash: crypto.SHA256, Checksum: sum, Signature: sig[:], PublicKey: pub, TargetPath: tmpfile, } diffFile, err := os.Open(pr.DiffFile) if err != nil { return err } defer diffFile.Close() err = update.Apply(diffFile, opts) if err != nil { return err } return nil }
func readSettings(c *Config) error { lg.V(5).Info("Reading settings file") isRead := false _, err := mkConfigDir() if err != nil { return err } data, err := ioutil.ReadFile(ConfigPath("settings.json")) if err != nil { lg.Infof("Error loading settings.json %s", err) } else { settings, err := parseConfig(data) if err != nil { lg.Warningf("Config file error, deleting and resetting") err := os.Remove(ConfigPath("settings.json")) if err != nil { lg.Warningf("Could not delete old settingsfile (should probably panic here)") } } else { currentConfig.Settings = *settings isRead = true } } if !isRead { settings, err := parseConfig([]byte(settingsTemplate)) if err != nil { panic("invalid defaultsettings") } currentConfig.Settings = *settings } transports := make(map[string]shared.Transport, 0) if currentConfig.Settings.Transports != nil { for _, v := range currentConfig.Settings.Transports { transports[v.Name] = v } } for _, v := range []shared.Transport{ {Name: "obfs3", Bundled: true, TorPT: true}, {Name: "obfs4", Bundled: true, TorPT: true}, {Name: "shadowsocks-client", Bundled: true}, } { transports[v.Name] = v } currentConfig.Settings.Transports = transports return nil }
// HtmlHandler is a HandlerFunc that serves all pages in the internal browser // using a single html template. func HtmlHandler(w http.ResponseWriter, r *http.Request) { templates := loadTemplates() err := templates.ExecuteTemplate(w, "page.html", struct { Title string PageData interface{} }{ Title: "Alkasir", }, ) if err != nil { lg.Infof("err: %+v", err) } }
// SetConfig reads configuration from json byte stream func parseConfig(config []byte) (*Settings, error) { s := &Settings{} err := json.Unmarshal(config, &s) if err != nil { return nil, err } for i, c := range s.Connections { err := c.EnsureID() if err != nil { lg.Fatal(err) } lg.V(15).Infof("connection id: %s", c.ID) if lg.V(50) { v, _ := c.Encode() lg.Infof("connection encoded: %s", v) lg.Infof("connection full: %+v", c) } s.Connections[i] = c } return s, nil }
// NewUpdateChecker creates and returns an UpdateChecker instance. // The caller should then listen on the RequestC channel for UpdateRequests. func NewUpdateChecker(name string) (*UpdateChecker, error) { c := &UpdateChecker{ Interval: time.Duration(1*time.Hour + (time.Minute * (time.Duration(rand.Intn(120))))), } c.response = make(chan UpdateResult) c.RequestC = make(chan UpdateRequest) c.forceRequestC = make(chan bool) lg.Infof("Setting up update timer for %s every %f minute(s) ", name, c.Interval.Minutes()) ticker := time.NewTicker(c.Interval) go func() { for { select { case <-c.forceRequestC: if !c.active { continue } c.RequestC <- UpdateRequest{ ResponseC: c.response, } case <-ticker.C: if !c.active { continue } c.RequestC <- UpdateRequest{ ResponseC: c.response, } case response := <-c.response: c.LastCheck = time.Now() switch response { case UpdateSuccess: lg.V(5).Infoln("UpdateSuccess") c.LastUpdate = c.LastCheck c.LastFailedCheck = time.Time{} case UpdateError: lg.Warningln("update check failed") c.LastFailedCheck = c.LastCheck <-time.After(3*time.Second + time.Duration(rand.Intn(5))) go func() { c.forceRequestC <- true }() } } } }() return c, nil }
func debugPprof(args []string) error { var dir string if len(args) == 0 { dir = debugLatestImport() } else { dir = args[0] } const profile = "heap" var header debugexport.DebugHeader data, err := ioutil.ReadFile(filepath.Join(dir, "header.json")) if err != nil { lg.Fatal(err) } err = json.Unmarshal(data, &header) if err != nil { lg.Fatal(err) } lg.Infof("%+v", header) q := nexus.BuildQuery{ OS: header.OS, Arch: header.Arch, Version: header.Version, Cmd: "alkasir-gui", } cmdlocation, err := q.GetMatchingBuildBinary() if err != nil { lg.Fatal(err) } var cmdargs []string cmdargs = append(cmdargs, "tool", "pprof", cmdlocation, filepath.Join(dir, profile+".txt")) cmd := exec.Command("go", cmdargs...) cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr return cmd.Run() }
func (s *sessionTokenStore) expireSessions() { var expired []shared.SuggestionToken start := time.Now() th := start.Add(-tokenSessionTimeout) s.RLock() for _, v := range s.sessions { if v.CreatedAt.Before(th) { expired = append(expired, v.ID) } } s.RUnlock() if len(expired) > 0 { s.Lock() for _, v := range expired { delete(s.sessions, v) } tokenSessionsActive.Set(float64(len(s.sessions))) tokenSessionsTotal.Set(float64(len(s.sessions))) s.Unlock() if lg.V(3) { lg.Infof("expired %d sessions in %s", len(expired), time.Now().Sub(start).String()) } } }
// SuggestionToken JSON API method. func SuggestionToken(dbclients db.Clients) func(w rest.ResponseWriter, r *rest.Request) { return func(w rest.ResponseWriter, r *rest.Request) { // HANDLE USERIP BEGIN req := shared.SuggestionTokenRequest{} err := r.DecodeJsonPayload(&req) if err != nil { apiError(w, shared.SafeClean(err.Error()), http.StatusInternalServerError) return } // validate country code. if !validCountryCode(req.CountryCode) { apiError(w, fmt.Sprintf("invalid country code: %s", req.CountryCode), http.StatusBadRequest) return } // parse/validate client ip address. IP := req.ClientAddr if IP == nil { apiError(w, "bad ClientAddr", http.StatusBadRequest) return } // parse and validate url. URL := strings.TrimSpace(req.URL) if URL == "" { apiError(w, "no or empty URL", http.StatusBadRequest) return } u, err := url.Parse(URL) if err != nil { apiError(w, fmt.Sprintf("%s is not a valid URL", URL), http.StatusBadRequest) return } if !shared.AcceptedURL(u) { apiError(w, fmt.Sprintf("%s is not a valid URL", URL), http.StatusBadRequest) return } // resolve ip to asn. var ASN int ASNres, err := dbclients.Internet.IP2ASN(IP) if err != nil { lg.Errorln(shared.SafeClean(err.Error())) apiError(w, shared.SafeClean(err.Error()), http.StatusInternalServerError) return } if ASNres != nil { ASN = ASNres.ASN } else { lg.Warningf("no ASN lookup result for IP: %s ", shared.SafeClean(IP.String())) } // reoslve ip to country code. countryCode := dbclients.Maxmind.IP2CountryCode(IP) // resolve ip to city geonameid geoCityID := dbclients.Maxmind.IP2CityGeoNameID(IP) req.ClientAddr = net.IPv4zero IP = net.IPv4zero // HANDLE USERIP END { supported, err := dbclients.DB.IsURLAllowed(u, countryCode) if err != nil { // TODO: standardize http status codes apiError(w, err.Error(), http.StatusForbidden) return } if !supported { lg.Infof("got request for unsupported URL %s") w.WriteJson(shared.SuggestionTokenResponse{ Ok: false, URL: req.URL, }) return } } // start new submission token session token := db.SessionTokens.New(URL) // create newclienttoken sample data sample := shared.NewClientTokenSample{ URL: URL, CountryCode: req.CountryCode, } sampleData, err := json.Marshal(sample) if err != nil { lg.Errorln(err) apiError(w, "error #20150424-002542-CEST", http.StatusInternalServerError) return } // create extraData extra := shared.IPExtraData{ CityGeoNameID: geoCityID, } extraData, err := json.Marshal(extra) if err != nil { lg.Errorln(err) apiError(w, "error #20150427-211052-CEST", http.StatusInternalServerError) return } // insert into db { err := dbclients.DB.InsertSample(db.Sample{ Host: u.Host, CountryCode: countryCode, ASN: ASN, Type: "NewClientToken", Origin: "Central", Token: token, Data: sampleData, ExtraData: extraData, }) if err != nil { apiError(w, err.Error(), http.StatusInternalServerError) return } } // queue central measurements measurements, err := measure.DefaultMeasurements(req.URL) if err != nil { lg.Warningf("could not create standard measurements: %s", err.Error()) } else { queueMeasurements(token, measurements...) } // write json response { err := w.WriteJson(shared.SuggestionTokenResponse{ Ok: true, URL: URL, Token: token, }) if err != nil { lg.Errorln(err.Error()) return } } } }
func createJobs(q nexus.BuildQuery, jobC chan CreatePatchJob, privateKey string, publicKey string) error { versions, err := q.GetVersions() if err != nil { lg.Fatal(err) } sort.Sort(versions) sort.Sort(sort.Reverse(versions)) if len(versions) < 2 { return errors.New("too few versions") } latestVersion := versions[0] lg.V(20).Infoln("latest version", latestVersion) { if len(versions) > PatchHistoryAmountMax+1 { versions = versions[1 : PatchHistoryAmountMax+1] } else { versions = versions[1:] } } lg.V(20).Infoln("old versions", versions) // TODO: reimplement this check so that upgrade processing can be resumed. // if _, err := os.Stat(jsonname); err == nil { // lg.Infof("%s exists, skipping processing", jsonname) // return nil // } latestBinPath, err := q.GetBinary(latestVersion) if err != nil { return err } lg.Infof("creating patchJobs for %s %s %s", latestVersion.ArtifactID, latestVersion.Classifier, latestVersion.Version, ) for _, v := range versions { bp, err := q.GetBinary(v) if err != nil { return err } j := CreatePatchJob{ Artifact: fmt.Sprintf("%s-%s", latestVersion.ArtifactID, latestVersion.Classifier), OldBinary: bp, NewBinary: latestBinPath, NewVersion: latestVersion.Version, OldVersion: v.Version, PrivateKey: privateKey, PublicKey: publicKey, } lg.V(10).Infof("sending created job %s", j.Artifact) jobC <- j lg.V(10).Infof("sent job %s", j.Artifact) } lg.Infof("all jobs created for %s", q.ArtifactDisplayName()) return nil }
func insertUpgrades([]string) error { if err := OpenDB(); err != nil { return err } files, err := findJSONFiles("diffs/") if err != nil { return err } var upgrades []db.UpgradeMeta for _, v := range files { lg.V(5).Infoln("reading", v) data, err := ioutil.ReadFile(v) if err != nil { return err } var cpr makepatch.CreatePatchResult err = json.Unmarshal(data, &cpr) if err != nil { return err } um, ok, err := sqlDB.GetUpgrade(db.GetUpgradeQuery{ Artifact: cpr.Artifact, Version: cpr.NewVersion, AlsoUnpublished: true, }) if err != nil { return err } if ok && um.Artifact == cpr.Artifact && um.Version == cpr.NewVersion { lgheader := cpr.Artifact + " " + cpr.NewVersion if um.ED25519Signature != cpr.ED25519Signature { lg.Warningf("%s signatures does not match!", lgheader) } if um.SHA256Sum != cpr.SHA256Sum { lg.Warningf("%s shasum does not match!", lgheader) } lg.Infof("%s is already imported, skipping", lgheader) continue } upgrades = append(upgrades, db.UpgradeMeta{ Artifact: cpr.Artifact, Version: cpr.NewVersion, SHA256Sum: cpr.SHA256Sum, ED25519Signature: cpr.ED25519Signature, }) } { // NOTE: this will be removed later, a quick hack before other upgrades refactoring takes place uniqeUpgrades := make(map[string]db.UpgradeMeta, 0) for _, v := range upgrades { uniqeUpgrades[fmt.Sprintf("%s---%s", v.Artifact, v.Version)] = v } upgrades = upgrades[:0] for _, v := range uniqeUpgrades { upgrades = append(upgrades, v) } } fmt.Println(upgrades) err = sqlDB.InsertUpgrades(upgrades) if err != nil { lg.Errorln(err) return err } return nil }
func (m *modifyConnections) Update() []shared.Connection { lg.Infoln("updating connections..") if lg.V(19) { lg.Infof("pre upgrade state:") for _, v := range m.Connections { lg.Infoln(v) } } // create map for id lookups conns := make(map[string]shared.Connection, 0) for _, connection := range m.Connections { conns[connection.ID] = connection } // remove old old connections for _, ID := range m.Remove { if _, ok := conns[ID]; ok { lg.V(19).Infof("remove connection: %s", ID) delete(conns, ID) } } // add new connections for _, v := range m.Add { conn, err := shared.DecodeConnection(v) if err != nil { lg.Fatal(err) } ID := conn.ID if _, ok := conns[ID]; !ok { lg.V(19).Infof("add connection: %s", ID) conns[ID] = conn } } // protect connections for _, ID := range m.Protect { if _, ok := conns[ID]; ok { c := conns[ID] c.Protected = true conns[ID] = c lg.V(19).Infof("protected connection: %s", ID) } } var result []shared.Connection for _, v := range conns { result = append(result, v) } if lg.V(19) { lg.Infof("upgraded connections result:") for _, v := range result { lg.Infoln(v) } } return result }
// Initialize service func (s *Service) initService() error { cmd := s.cmd stdout, err := cmd.StdoutPipe() if err != nil { return err } s.stdout = stdout stderr, err := cmd.StderrPipe() if err != nil { return err } s.stderr = stderr stdin, err := cmd.StdinPipe() if err != nil { return err } s.stdin = stdin if lg.V(5) { alkasirEnv := "" for _, v := range s.cmd.Env { if strings.HasPrefix(v, "ALKASIR_") { alkasirEnv += v + " " } } lg.Infof("Starting service: %s %s", alkasirEnv, cmd.Path) } err = cmd.Start() if err != nil { return err } scanner := bufio.NewScanner(stdout) var transportErrMsg string transportErr := false var line string for scanner.Scan() { line = scanner.Text() lg.V(5).Infoln("DBG: ", line) if errorM.MatchString(line) { transportErr = true transportErrMsg = line return errors.New("error: " + transportErrMsg) } else if doneM.MatchString(line) { break } else if exposeM.MatchString(line) { match := exposeM.FindStringSubmatch(line) s.Response["bindaddr"] = match[3] s.Response["protocol"] = match[2] s.registerMethod(match[1], match[2], match[3]) } else if versionM.MatchString(line) { } else if parentM.MatchString(line) { match := parentM.FindStringSubmatch(line) s.Response["parentaddr"] = match[3] } else { lg.Infoln("not handled line:", line) return errors.New("unhandeled line") } } if transportErr { err := cmd.Wait() if err != nil { lg.Warningln(err) } lg.Fatal(transportErrMsg) return errors.New("transport err") } return err }