func httpWorker(jobs <-chan int, hosts []host, bar *pb.ProgressBar) { for job := range jobs { h := &hosts[job] timeout, err := time.ParseDuration(*httpClientTimeout) if err != nil { log.Fatal(err) } client := http.Client{ Timeout: timeout, } url := fmt.Sprintf("%s://%s:%d/xmldata?item=All", *scheme, hosts[job].ip.String(), *port) // Anon function for proper wg.Done() call func() { defer bar.Increment() defer wg.Done() res, err := client.Get(url) if err != nil { h.Err += err.Error() return } xmldata, err := ioutil.ReadAll(res.Body) defer res.Body.Close() if err != nil { h.Err += err.Error() return } h.XMLErr = xml.Unmarshal(xmldata, h) }() } }
func StartClient(c chan string, bar *pb.ProgressBar) { readClient, writeClient := http.Client{}, http.Client{} for file := range c { file = escapeFile(file) read := readUrl + file write := writeUrl + file // get resp, err := readClient.Get(read) if err != nil { panic(err) } getMd5 := resp.Header.Get("X-Ae-Md5") length := resp.ContentLength // put req, err := http.NewRequest(*method, write, resp.Body) if err != nil { panic(err) } req.ContentLength = length wres, err := writeClient.Do(req) if err != nil { panic(err) } if wres.StatusCode != http.StatusConflict { putMd5 := wres.Header.Get("X-Ae-Md5") if putMd5 != getMd5 { fmt.Printf("ERROR! MD5 not equals: %s vs %s (%s)\n", getMd5, putMd5, file) } } wres.Body.Close() resp.Body.Close() bar.Increment() } }
func HostMerge(List []Host, ShowBar bool) []string { count := 0 filterList := []string{""} length := len(List) var bar *pb.ProgressBar if ShowBar == true { bar = pb.StartNew(length) bar.SetMaxWidth(80) } for _, Host := range List { length = len(filterList[count]) if length == 0 { filterList[count] = Host.Hostname } else if length+Host.length() <= 255 && length != 0 { filterList[count] += "|" filterList[count] += Host.Hostname } else { count++ filterList = append(filterList, Host.Hostname) // filterList[count] = Ref.Referrer } if ShowBar == true { bar.Increment() time.Sleep(time.Millisecond * 50) } } if ShowBar == true { bar.Finish() } return filterList }
func (g *Game) getInitTweets(aliveNum int) { var progress *pb.ProgressBar if !g.debug { progress = pb.StartNew(aliveNum) } for y := 0; y < g.field.SizeY; y++ { for x := 0; x < g.field.SizeX; x++ { if g.field.Points[y][x].IsAlive { tweet := <-g.tweetCh if g.debug { emoji.Printf(":bird:Points[%v][%v]: %v\n", x, y, tweet) } g.field.Points[y][x].Str = tweet if !g.debug { progress.Increment() } } } } if g.debug { emoji.Println(":smile::smile::smile:Collected initial tweets:smile::smile::smile:") } else { e := emoji.Sprint(":smile::smile::smile:") progress.FinishPrint(e + "Collected initial tweets" + e) } }
func work(wg *sync.WaitGroup, bar *pb.ProgressBar, queue chan *Track) { defer wg.Done() for track := range queue { track.Download() bar.Increment() } }
func main() { opts, args := parseFlags() conv := cbconvert.NewConvertor(opts) var bar *pb.ProgressBar c := make(chan os.Signal, 3) signal.Notify(c, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) go func() { for _ = range c { fmt.Fprintf(os.Stderr, "Aborting\n") os.RemoveAll(conv.Workdir) os.Exit(1) } }() if _, err := os.Stat(opts.Outdir); err != nil { os.MkdirAll(opts.Outdir, 0777) } files := conv.GetFiles(args) if opts.Cover || opts.Thumbnail { if !opts.Quiet { bar = pb.New(conv.Nfiles) bar.ShowTimeLeft = false bar.Start() } } for _, file := range files { stat, err := os.Stat(file) if err != nil { fmt.Fprintf(os.Stderr, "Error Stat: %v\n", err.Error()) continue } if opts.Cover { conv.ExtractCover(file, stat) if !opts.Quiet { bar.Increment() } continue } else if opts.Thumbnail { conv.ExtractThumbnail(file, stat) if !opts.Quiet { bar.Increment() } continue } conv.ConvertComic(file, stat) } }
func indexAndSaveHits(ts *index.TokenSetSearcher, hits []HitInfo, idxs []int, saveFullHit func(*HitInfo) error) error { rank := 0 var bar *pb.ProgressBar if terminal.IsTerminal(int(os.Stdout.Fd())) { bar = pb.New(len(idxs)) bar.Start() } for i := range idxs { hit := &hits[idxs[i]] if i > 0 && hit.StaticScore < hits[idxs[i-1]].StaticScore { rank = i } hit.StaticRank = rank if err := saveFullHit(hit); err != nil { return err } var desc, readme string desc, hit.Description = hit.Description, "" readme, hit.ReadmeData = hit.ReadmeData, "" hit.Imported = nil hit.TestImported = nil var nameTokens stringsp.Set nameTokens = AppendTokens(nameTokens, []byte(hit.Name)) var tokens stringsp.Set tokens.Add(nameTokens.Elements()...) tokens = AppendTokens(tokens, []byte(hit.Package)) tokens = AppendTokens(tokens, []byte(desc)) tokens = AppendTokens(tokens, []byte(readme)) tokens = AppendTokens(tokens, []byte(hit.Author)) for _, word := range hit.Exported { AppendTokens(tokens, []byte(word)) } ts.AddDoc(map[string]stringsp.Set{ IndexTextField: tokens, IndexNameField: nameTokens, IndexPkgField: stringsp.NewSet(hit.Package), }, *hit) if bar != nil { bar.Increment() } } if bar != nil { bar.FinishPrint("Indexing finished!") } DumpMemStats() return nil }
// CheckMetadata downloads the metadata about all of the files currently // stored on Drive and compares it with the local cache. func (gd *GDrive) CheckMetadata(filename string, report func(string)) error { idToFile, err := gd.getIdToFile(filename) if err != nil { return err } // This will almost certainly take a while, so put up a progress bar. var bar *pb.ProgressBar if !gd.quiet { bar = pb.New(len(idToFile)) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Checking metadata cache: ") bar.Start() } err = gd.runQuery("trashed=false", func(f *drive.File) { if file, ok := idToFile[f.Id]; ok { df := newFile(f.Title, f) if !filesEqual(df, file) { report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v", file.Path, file, df)) } if bar != nil { bar.Increment() } delete(idToFile, f.Id) } else { // It'd be preferable to have "sharedWithMe=false" included in // the query string above, but the combination of that with // "trashed=false" seems to lead to no results being returned. if f.Shared == false { report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]", f.Title, f)) } } }) for _, f := range idToFile { report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]", f.Path, f)) } if bar != nil { bar.Finish() } return nil }
func main() { var progress *pb.ProgressBar page := 0 category := "bugs" for { printf("Get page: %d", page) tickets, _, err := sfClient.Tracker.Info(category) if err != nil { log.Fatal(err) } if ghMilestones == nil { createMilestones(tickets) getMilestones() } if progress == nil { log.Println("Creating tickets") progress = pb.StartNew(tickets.Count) } if len(tickets.Tickets) == 0 { break } for _, ticket := range tickets.Tickets { ticket, _, err := sfClient.Tracker.Get(category, ticket.TicketNum) if err != nil { log.Fatal(err) } sfTicketToGhIssue(ticket, category) progress.Increment() time.Sleep(time.Millisecond * sleepTime) } page += 1 } progress.FinishPrint("All tickets imported") }
func scan(ips []string, out chan ILOInfo, bar *pb.ProgressBar, wg *sync.WaitGroup) { for _, host := range ips { if IsOpen(host, iloPort) { srvName := "" iloName := "" info, err := requestInfo(host) if err != nil { fmt.Println(err) } if match, _ := regexp.MatchString("iLO (3|4|5)", info.HW); match { srvName, iloName, _ = requestServerName(host) } else { srvName, iloName, _ = requestServerNameV2(host) } info.ServerName = srvName info.IloName = iloName out <- *info } bar.Increment() } wg.Done() }
// processSystemArchives processes archives for given system func (h *Harvester) processSystemArchives(s *system.System, archives []string) error { var bar *pb.ProgressBar nb := len(archives) // extract archives if !s.Options.Quiet { fmt.Printf("[%s] Extracting %v archive(s)\n", s.Infos.Name, nb) if !s.Options.Debug { bar = pb.StartNew(nb) bar.ShowCounters = true bar.ShowPercent = false bar.ShowTimeLeft = true bar.SetMaxWidth(80) } } for _, archive := range archives { if !s.Options.Quiet && !s.Options.Debug { bar.Increment() } if err := s.ProcessArchive(archive, h.Options.Output); err != nil { return err } } if !s.Options.Quiet && !s.Options.Debug { bar.Finish() fmt.Printf("[%s] Processed %v files (skipped: %v)\n", s.Infos.Name, s.Processed, s.Skipped) } fmt.Printf("[%s] Selected %v games\n", s.Infos.Name, len(s.Games)) return nil }
func dumpToBoltDB(path string, words map[string]int, tuples map[string]string, tupleLength int) { var bar *pb.ProgressBar var start time.Time wordBuckets := int(len(words) / 600) if wordBuckets < 10 { wordBuckets = 10 } if VERBOSE { fmt.Printf("Creating %v word buckets\n", wordBuckets) } if _, err := os.Stat(path); err == nil { os.Remove(path) if VERBOSE { fmt.Println("Removed old " + path) } } // Open a new bolt database db, err := bolt.Open(path, 0600, &bolt.Options{NoGrowSync: false}) if err != nil { log.Fatal(err) } defer db.Close() if VERBOSE { fmt.Println("Creating subset buckets...") bar = pb.StartNew(len(tuples)) start = time.Now() } err = db.Batch(func(tx *bolt.Tx) error { for k := range tuples { if VERBOSE { bar.Increment() } firstLetter := string(k[0]) secondLetter := string(k[1]) if strings.Contains(alphabet, firstLetter) && strings.Contains(alphabet, secondLetter) { _, err := tx.CreateBucketIfNotExists([]byte("tuples-" + firstLetter + secondLetter)) if err != nil { return fmt.Errorf("create bucket: %s", err) } } } return nil }) if err != nil { log.Fatal(err) } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Creating subset buckets took " + elapsed.String()) } db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("tuples")) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }) if VERBOSE { fmt.Println("Creating words buckets...") } db.Batch(func(tx *bolt.Tx) error { for i := 0; i < wordBuckets; i++ { _, err := tx.CreateBucket([]byte("words-" + strconv.Itoa(i))) if err != nil { return fmt.Errorf("create bucket: %s", err) } } return nil }) db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("vars")) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }) // fmt.Printf("INSERT INTO words (id,word) values (%v,'%v');\n", v, k) if VERBOSE { fmt.Println("Loading words into db...") start = time.Now() bar = pb.StartNew(len(words)) } err = db.Batch(func(tx *bolt.Tx) error { for k, v := range words { if VERBOSE { bar.Increment() } if len(k) > 0 { b := tx.Bucket([]byte("words-" + strconv.Itoa(int(math.Mod(float64(v), float64(wordBuckets)))))) b.Put([]byte(strconv.Itoa(v)), []byte(k)) } } return nil }) if err != nil { log.Fatal(err) } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Words took " + elapsed.String()) } if VERBOSE { fmt.Println("Loading subsets into db...") start = time.Now() bar = pb.StartNew(len(tuples)) } err = db.Update(func(tx *bolt.Tx) error { for k, v := range tuples { if VERBOSE { bar.Increment() } firstLetter := string(k[0]) secondLetter := string(k[1]) if strings.Contains(alphabet, firstLetter) && strings.Contains(alphabet, secondLetter) { b := tx.Bucket([]byte("tuples-" + firstLetter + secondLetter)) b.Put([]byte(k), []byte(v)) } else { b := tx.Bucket([]byte("tuples")) b.Put([]byte(k), []byte(v)) } } return nil }) if err != nil { log.Fatal(err) // BUG(schollz): Windows file resize error: https://github.com/schollz/goagrep/issues/6 } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Subsets took " + elapsed.String()) } db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("vars")) err := b.Put([]byte("tupleLength"), []byte(strconv.Itoa(tupleLength))) return err }) db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("vars")) err := b.Put([]byte("wordBuckets"), []byte(strconv.Itoa(wordBuckets))) return err }) }
func StoreRedirects(db *sql.DB, redirs []wikidump.Redirect, bar *pb.ProgressBar) error { counts := make([]linkCount, 0) var titleId, old, del, delTitle, insTitle, ins, update *sql.Stmt tx, err := db.Begin() if err == nil { titleId, err = tx.Prepare(`select id from titles where title = ?`) } if err == nil { old, err = tx.Prepare( `select ngramhash, count from linkstats where targetid = ?`) } if err == nil { del, err = tx.Prepare(`delete from linkstats where targetid = ?`) } if err == nil { delTitle, err = tx.Prepare(`delete from titles where id = ?`) } if err == nil { insTitle, err = tx.Prepare( `insert or ignore into titles values (NULL, ?)`) } if err == nil { ins, err = tx.Prepare( `insert or ignore into linkstats values (?, (select id from titles where title = ?), 0)`) } if err == nil { update, err = tx.Prepare( `update linkstats set count = count + ? where targetid = (select id from titles where title = ?) and ngramhash = ?`) } if err != nil { return err } for _, r := range redirs { if bar != nil { bar.Increment() } var fromId int err := titleId.QueryRow(r.Title).Scan(&fromId) if err == sql.ErrNoRows { // No links to this redirect. continue } else if err != nil { return err } rows, err := old.Query(fromId) if err != nil { return err } // SQLite won't let us INSERT or UPDATE while doing a SELECT. for counts = counts[:0]; rows.Next(); { var count float64 var hash int64 rows.Scan(&hash, &count) counts = append(counts, linkCount{hash, count}) } rows.Close() err = rows.Err() if err == nil { _, err = del.Exec(fromId) } if err == nil { _, err = delTitle.Exec(fromId) } if err != nil { return err } for _, c := range counts { if err == nil { _, err = insTitle.Exec(r.Target) } if err == nil { _, err = ins.Exec(c.hash, r.Target) } if err == nil { _, err = update.Exec(c.count, r.Target, c.hash) } } if err != nil { return err } } err = tx.Commit() return err }
func (ctx *Context) Load() error { var bar *pb.ProgressBar if Verbose { log.Println("loading database") } db, err := LoadDB(ctx.DatabaseName, *decompress) if os.IsNotExist(err) { log.Printf("database not found") return nil } else if err != nil { log.Printf("error loading database: %s", err) return err } if Verbose { log.Println("loading teams") bar = pb.StartNew(len(db.Teams)) } for _, team := range db.Teams { ctx.AddTeam(team) if Verbose { bar.Increment() } } if Verbose { bar.Finish() } if Verbose { log.Println("loading match history") bar = pb.StartNew(len(db.Outcomes)) } loadedOutcomes := map[string]*Outcome{} for _, outcome := range db.Outcomes { loadedOutcomes[outcome.ID] = outcome if Verbose { bar.Increment() } } if Verbose { bar.Finish() } if Verbose { bar = pb.StartNew(len(db.Matches)) } for _, match := range db.Matches { outcome, ok := loadedOutcomes[match.OutcomeID] if !ok { log.Panicf("corrupted history %q", match.ID) } ctx.AddMatch(match, outcome) if Verbose { bar.Increment() } } if Verbose { bar.Finish() } return nil }
func main() { file := flag.String("file", "", "File containing the list of packages") output := flag.String("output", "gddoscore.out", "Output file") progress := flag.Bool("progress", false, "Show a progress bar") flag.Parse() var pkgs map[string]bool var err error if file != nil && *file != "" { pkgs, err = readFromFile(*file) } else { pkgs, err = readFromStdin() } if err != nil { fmt.Println(err) return } db, err := database.New() if err != nil { fmt.Println("error connecting to database:", err) return } o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Println("error creating output file:", err) return } defer o.Close() log.SetOutput(o) log.Println("BEGIN") log.Printf("%d packages will be analyzed\n", len(pkgs)) var progressBar *pb.ProgressBar if progress != nil && *progress { progressBar = pb.StartNew(len(pkgs)) } db.Do(func(pkg *database.PackageInfo) error { if _, ok := pkgs[pkg.PDoc.ImportPath]; !ok { // we aren't analyzing this package return nil } if pkg.Score == 0 { log.Printf("package “%s” has no score", pkg.PDoc.ImportPath) } else { log.Printf("package “%s” has score", pkg.PDoc.ImportPath) if progress != nil && !*progress { fmt.Println(pkg.PDoc.ImportPath) } } if progress != nil && *progress { progressBar.Increment() } return nil }) if progress != nil && *progress { progressBar.Finish() } log.Println("END") }
func main() { clientID := flag.String("id", "", "Github client ID") clientSecret := flag.String("secret", "", "Github client secret") file := flag.String("file", "", "File containing the list of packages") output := flag.String("output", "gddofork.out", "Output file") progress := flag.Bool("progress", false, "Show a progress bar") flag.Parse() var auth *gddoexp.GithubAuth if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") { if *clientID == "" || *clientSecret == "" { fmt.Println("to enable Gthub authentication, you need to inform the id and secret") flag.PrintDefaults() return } auth = &gddoexp.GithubAuth{ ID: *clientID, Secret: *clientSecret, } } var pkgs []database.Package var err error if file != nil && *file != "" { pkgs, err = readFromFile(*file) } else { pkgs, err = readFromStdin() } if err != nil { fmt.Println(err) return } o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Println("error creating output file:", err) return } defer o.Close() log.SetOutput(o) log.Println("BEGIN") log.Printf("%d packages will be analyzed", len(pkgs)) var progressBar *pb.ProgressBar if progress != nil && *progress { progressBar = pb.StartNew(len(pkgs)) } var cache int for response := range gddoexp.AreFastForkPackages(pkgs, auth) { if progress != nil && *progress { progressBar.Increment() } if response.Cache { cache++ } if response.Error != nil { log.Println(response.Error) } else if response.FastFork { log.Printf("package “%s” is a fast fork\n", response.Path) if progress != nil && !*progress { fmt.Println(response.Path) } } else { log.Printf("package “%s” is not a fast fork\n", response.Path) } } if progress != nil && *progress { progressBar.Finish() } log.Println("Cache hits:", cache) log.Println("END") }
func upload(cmd *cobra.Command, args []string) error { if len(args) != 1 { cmd.Help() return nil } if concurrencyFlag > probesFlag { concurrencyFlag = probesFlag } if concurrencyFlag == 0 { concurrencyFlag++ } token, err := getToken() if err != nil { log.Error(err) return err } var fns []string if cernDistributionFlag { vals, err := createCERNDistribution() if err != nil { return err } fns = vals } else { fd, err := createFile(fmt.Sprintf("testfile-manual-count-%d-bs-%d", countFlag, bsFlag), "1", countFlag, bsFlag) if err != nil { return err } fns = []string{fd.Name()} fd.Close() } defer func() { for _, v := range fns { os.RemoveAll(v) } }() benchStart := time.Now() total := 0 errorProbes := 0 errChan := make(chan error) resChan := make(chan string) doneChan := make(chan bool) limitChan := make(chan int, concurrencyFlag) for i := 0; i < concurrencyFlag; i++ { limitChan <- 1 } var bar *pb.ProgressBar if progressBar { fmt.Printf("There are %d possible files to upload\n", len(fns)) bar = pb.StartNew(probesFlag) } for i := 0; i < probesFlag; i++ { rand.Seed(time.Now().UnixNano()) filename := fns[rand.Intn(len(fns))] go func(fn string) { <-limitChan defer func() { limitChan <- 1 }() // open again the file lfd, err := os.Open(fn) if err != nil { errChan <- err return } defer lfd.Close() c := &http.Client{} // connections are reused if we reuse the client // PUT will close the fd // is it possible that the HTTP client is reusing connections so is being blocked? target := args[0] if randomTargetFlag { rawUUID, err := uuid.NewV4() if err != nil { errChan <- err return } target += rawUUID.String() } req, err := http.NewRequest("PUT", dataAddr+target, lfd) if err != nil { errChan <- err return } req.Header.Add("Content-Type", "application/octet-stream") req.Header.Add("Authorization", "Bearer "+token) req.Header.Add("CIO-Checksum", checksumFlag) res, err := c.Do(req) if err != nil { errChan <- err return } err = res.Body.Close() if err != nil { errChan <- err return } if res.StatusCode != 201 { err := fmt.Errorf("Request failed with status code %d", res.StatusCode) errChan <- err return } doneChan <- true resChan <- "" return }(filename) } for { select { case _ = <-doneChan: total++ if progressBar { bar.Increment() } case _ = <-resChan: case err := <-errChan: log.Error(err) errorProbes++ total++ if progressBar { bar.Increment() } } if total == probesFlag { break } } if progressBar { bar.Finish() } numberRequests := probesFlag concurrency := concurrencyFlag totalTime := time.Since(benchStart).Seconds() failedRequests := errorProbes frequency := float64(numberRequests-failedRequests) / totalTime period := float64(1 / frequency) volume := numberRequests * countFlag * bsFlag / 1024 / 1024 throughput := float64(volume) / totalTime data := [][]string{ {"#NUMBER", "CONCURRENCY", "TIME", "FAILED", "FREQ", "PERIOD", "VOLUME", "THROUGHPUT"}, {fmt.Sprintf("%d", numberRequests), fmt.Sprintf("%d", concurrency), fmt.Sprintf("%f", totalTime), fmt.Sprintf("%d", failedRequests), fmt.Sprintf("%f", frequency), fmt.Sprintf("%f", period), fmt.Sprintf("%d", volume), fmt.Sprintf("%f", throughput)}, } w := csv.NewWriter(output) w.Comma = ' ' for _, d := range data { if err := w.Write(d); err != nil { return err } } w.Flush() if err := w.Error(); err != nil { return err } return nil }
func scanWords(wordpath string, tupleLength int, makeLookup bool) (words map[string]int, tuples map[string]string, wordsLookup map[int]string, tuplesLookup map[string][]int) { totalLines := lineCount(wordpath) inFile, _ := os.Open(wordpath) defer inFile.Close() scanner := bufio.NewScanner(inFile) scanner.Split(bufio.ScanLines) // initialize words = make(map[string]int) tuples = make(map[string]string) wordsLookup = make(map[int]string) tuplesLookup = make(map[string][]int) numTuples := 0 numWords := 0 lineNum := 0 var bar *pb.ProgressBar if VERBOSE { fmt.Println("Parsing subsets...") bar = pb.StartNew(totalLines) } for scanner.Scan() { if VERBOSE { bar.Increment() } lineNum++ s := strings.TrimSpace(scanner.Text()) _, ok := words[s] if ok == false { if makeLookup { wordsLookup[numWords] = s } else { words[s] = numWords } partials := getPartials(s, tupleLength) for i := 0; i < len(partials); i++ { _, ok := tuples[partials[i]] if makeLookup { _, ok = tuplesLookup[partials[i]] } if ok == false { if makeLookup { tuplesLookup[partials[i]] = append([]int{}, numWords) } else { tuples[partials[i]] = strconv.Itoa(numWords) } numTuples++ } else { if makeLookup { tuplesLookup[partials[i]] = append(tuplesLookup[partials[i]], numWords) } else { tuples[partials[i]] += " " + strconv.Itoa(numWords) } } } numWords++ } } if VERBOSE { bar.FinishPrint("Finished parsing subsets") } return }
func main() { kingpin.CommandLine.HelpFlag.Short('h') kingpin.Parse() if *downloadFlag == true { *downloadFlag = false } else { *downloadFlag = true } w := new(tabwriter.Writer) var output io.Writer if *confirmFlag == false { var err error // log.SetFlags(0) LogFileLocation := flag.String("log", "BotSifter.log", "Specifies path of the log file") output, err = os.OpenFile(*LogFileLocation, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { log.Println(err) return } } else { log.SetFlags(0) } if output == nil { output = os.Stdout } w.Init(output, 4, 4, 2, ' ', 0) log.SetOutput(w) //Read config file var GooFig GoogleConfig data, err := ioutil.ReadFile(*configFile) if err != nil { fmt.Println("Unable to open configuration file: " + *configFile) return } //Load config data from file into struct err = yaml.Unmarshal(data, &GooFig) if err != nil { log.Println(err) return } fmt.Println("\nConfig File: \t\t[" + *configFile + "]") fmt.Println("Include Refferers File: [" + GooFig.RefWhite + "]") fmt.Println("Exclude Refferers File: [" + GooFig.RefBlack + "]") fmt.Println("Include UA File: \t[" + GooFig.UAWhite + "]") fmt.Println("Exclude UA File: \t[" + GooFig.UABlack + "]") fmt.Println("Exclude Hostname File: \t[" + GooFig.HostInc + "]") //Loading variables from config struct WebPropertyId := GooFig.UACode AccountID := WebPropertyId[3:11] //Authentication settings conf := &oauth2.Config{ ClientID: GooFig.ClientID, ClientSecret: GooFig.ClientSecret, RedirectURL: GooFig.RedirectURL, Scopes: []string{ "https://www.googleapis.com/auth/analytics", "https://www.googleapis.com/auth/analytics.edit", "https://www.googleapis.com/auth/analytics.manage.users", }, Endpoint: google.Endpoint, } //If the config data doesn't contain Auth Token we need to get one fmt.Println("") if GooFig.Token == nil { fmt.Print("Authenticating user...") GooFig.Token = auth(conf, GooFig.Port) fmt.Println("\t\t\t\t\tCompleted") } //Load new client and service to talk with Google api fmt.Print("Setting up Google client...") client := conf.Client(oauth2.NoContext, GooFig.Token) service, err := analytics.New(client) if err != nil { log.Fatalln(err) // GooFig.Token = nil } fmt.Println("\t\t\t\t\tCompleted") //Retrieve BotSifter list from server if the cleanFlag is false var resp Response var respDisplay Response //Load csv files into appropriate structs fmt.Print("Loading includes, excludes and hostname lists...") uainc := ReadReferrerList(GooFig.UAWhite) uaexc := ReadReferrerList(GooFig.UABlack) refs := ReadReferrerList(GooFig.RefWhite) excs := ReadReferrerList(GooFig.RefBlack) hosts := ReadHostList(GooFig.HostInc) fmt.Println("\t\tCompleted") if *cleanFlag == false { if *downloadFlag == true { fmt.Print("Downloading BotSifter Referrer List...") resp = retreiveList(GooFig.Person) respDisplay = resp fmt.Println("\t\t\t\tCompleted") } if resp.Status == "Unauthorized" { fmt.Println("Download failed: Invalid username/password") return } //Append contents from includeList.csv onto the ReferrerList struct and remove duplicate entries fmt.Print("Merging local include data with BotSifter data...") resp.ReferrerList = append(resp.ReferrerList, refs...) resp.UserAgentList = append(resp.UserAgentList, uainc...) RemoveDuplicates(&resp.UserAgentList) RemoveDuplicates(&resp.ReferrerList) fmt.Println("\t\tCompleted") //Remove contents from ReferrerList which were found on the excludeList.csv fmt.Print("Removing local exclude data from BotSifter data...") resultsRef := []Referrer{} for _, compFilter := range resp.ReferrerList { found := false for _, exc := range excs { if compFilter.Referrer == exc.Referrer { found = true } } if !found { resultsRef = append(resultsRef, compFilter) } } resp.ReferrerList = resultsRef resultsUA := []Referrer{} for _, compFilter := range resp.UserAgentList { found := false for _, exc := range uaexc { if compFilter.Referrer == exc.Referrer { found = true } } if !found { resultsUA = append(resultsUA, compFilter) } } resp.UserAgentList = resultsUA fmt.Println("\t\tCompleted") } fmt.Print("Download current BotSifter filters to build comparison lists...") //List current Botsifter filters in GA account filters, err := service.Management.Filters.List(AccountID).Do() if err != nil { log.Fatalln(err) } var oldFilterListUA []Referrer var oldFilterListRef []Referrer var oldFilterListHost []Host for _, oldFilter := range filters.Items { if strings.Contains(oldFilter.Name, "BotSifter UA") == true { if filterExpression := oldFilter.ExcludeDetails; filterExpression != nil { filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1)) filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1)) for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") { oldFilterListUA = append(oldFilterListUA, Referrer{ref, "", ""}) } } } if strings.Contains(oldFilter.Name, "BotSifter Ref") == true { if filterExpression := oldFilter.ExcludeDetails; filterExpression != nil { filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1)) filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1)) for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") { oldFilterListRef = append(oldFilterListRef, Referrer{ref, "", ""}) } } } if strings.Contains(oldFilter.Name, "BotSifter Hostname") == true { if filterExpression := oldFilter.IncludeDetails; filterExpression != nil { filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\.", ".", -1)) filterExpression.ExpressionValue = (strings.Replace(filterExpression.ExpressionValue, "\\+", "+", -1)) for _, ref := range strings.Split(filterExpression.ExpressionValue, "|") { oldFilterListHost = append(oldFilterListHost, Host{ref}) } } } } onlyInNewListRefs := Referrers(resp.ReferrerList).findEntriesOnlyInLeftSide(oldFilterListRef) onlyInOldListRefs := Referrers(oldFilterListRef).findEntriesOnlyInLeftSide(resp.ReferrerList) inBothListsRefs := Referrers(resp.ReferrerList).findInBoth(oldFilterListRef) onlyInNewListUAs := Referrers(resp.UserAgentList).findEntriesOnlyInLeftSide(oldFilterListUA) onlyInOldListUAs := Referrers(oldFilterListUA).findEntriesOnlyInLeftSide(resp.UserAgentList) inBothListsUAs := Referrers(resp.UserAgentList).findInBoth(oldFilterListUA) onlyInNewListHosts := Hosts(hosts).findEntriesOnlyInLeftSide(oldFilterListHost) onlyInOldListHosts := Hosts(oldFilterListHost).findEntriesOnlyInLeftSide(hosts) inBothListsHosts := Hosts(hosts).findInBoth(oldFilterListHost) var Ref Referrer resultsRef := []Referrer{} for _, Ref = range resp.ReferrerList { Ref.Referrer = (strings.Replace(Ref.Referrer, ".", "\\.", -1)) Ref.Referrer = (strings.Replace(Ref.Referrer, "+", "\\+", -1)) resultsRef = append(resultsRef, Ref) } resp.ReferrerList = resultsRef resultsUA := []Referrer{} for _, Ref = range resp.UserAgentList { Ref.Referrer = (strings.Replace(Ref.Referrer, ".", "\\.", -1)) Ref.Referrer = (strings.Replace(Ref.Referrer, "+", "\\+", -1)) resultsUA = append(resultsUA, Ref) } resp.UserAgentList = resultsUA resultsHost := []Host{} for _, h := range hosts { h.Hostname = (strings.Replace(h.Hostname, ".", "\\.", -1)) h.Hostname = (strings.Replace(h.Hostname, "+", "\\+", -1)) resultsHost = append(resultsHost, h) } hosts = resultsHost fmt.Println("\tCompleted") fmt.Println("") log.Println("Current Botsifter Bots:") log.Println("\n#################### CURRENT BotSifter BOTS ####################") log.Println("Referrers:\n") log.Println("\tRANK\tNAME\tSCORE") for _, Ref = range respDisplay.ReferrerList { log.Println(Ref) } log.Println("") log.Println("User Agents:\n") log.Println("\tRANK\tNAME\tSCORE") for _, Ref = range respDisplay.UserAgentList { log.Println(Ref) } log.Println("") log.Println("\nBotSifter will make the following changes to your GA Account[" + GooFig.UACode + "]:") log.Println("\n#################### HOST CHANGES ####################") log.Println("Added Hosts:\n") if onlyInNewListHosts != nil { sort.Sort(onlyInNewListHosts) for _, h := range onlyInNewListHosts { log.Println(h) } } else { log.Println("\tNONE") } log.Println("") log.Println("Removed Hosts:\n") if onlyInOldListHosts != nil { sort.Sort(onlyInOldListUAs) for _, h := range onlyInOldListHosts { log.Println(h) } } else { log.Println("\tNONE") } // log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]")) log.Println("") log.Println("Hosts unchange:\n") if inBothListsHosts != nil { sort.Sort(inBothListsUAs) for _, h := range inBothListsHosts { log.Println(h) } } else { log.Println("\tNONE") } log.Println("\n#################### REFERRER CHANGES ####################") log.Println("Added Referrers:\n") if onlyInNewListRefs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(onlyInNewListRefs) for _, Ref = range onlyInNewListRefs { log.Println(Ref) } } else { log.Println("\tNONE") } log.Println("") log.Println("Removed Referrers:\n") if onlyInOldListRefs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(onlyInOldListRefs) for _, Ref = range onlyInOldListRefs { log.Println(Ref) } } else { log.Println("\tNONE") } // log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]")) log.Println("") log.Println("Referrers unchange:\n") if inBothListsRefs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(inBothListsRefs) for _, Ref = range inBothListsRefs { log.Println(Ref) } } else { log.Println("\tNONE") } log.Println("\n#################### USER AGENTS CHANGES ####################") log.Println("Added User Agents:\n") if onlyInNewListUAs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(onlyInNewListUAs) for _, Ref = range onlyInNewListUAs { log.Println(Ref) } } else { log.Println("\tNONE") } log.Println("") log.Println("Removed User Agents:\n") if onlyInOldListUAs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(onlyInOldListUAs) for _, Ref = range onlyInOldListUAs { log.Println(Ref) } } else { log.Println("\tNONE") } // log.Println(strings.Trim(fmt.Sprint(onlyInOldListRefs), "[]")) log.Println("") log.Println("User Agents unchange:\n") if inBothListsUAs != nil { log.Println("\tRANK\tNAME\tSCORE") sort.Sort(inBothListsUAs) for _, Ref = range inBothListsUAs { log.Println(Ref) } } else { log.Println("\tNONE") } w.Flush() // log.Println(strings.Trim(fmt.Sprint(inBothListsRefs), "[]")) log.Println("") if *confirmFlag == false { length := len(filters.Items) var bar *pb.ProgressBar if length != 0 { bar = pb.StartNew(length) bar.SetMaxWidth(80) fmt.Println("Deleting old BotSifter filters ") for _, eachFilter := range filters.Items { if strings.Contains(eachFilter.Name, "BotSifter") == true { service.Management.Filters.Delete(AccountID, eachFilter.Id).Do() } bar.Increment() time.Sleep(time.Millisecond * 250) } bar.Finish() } else { fmt.Println("No filters to delete") } //If cleanFlag entered then end program here if *cleanFlag == true { return } //If view is not defined in config file then ask user which one to apply filters too if GooFig.View == "" { //List all views profiles, err := service.Management.Profiles.List(AccountID, WebPropertyId).Do() if err != nil { log.Println(err) } for i, profile := range profiles.Items { fmt.Printf("%d. %s\n", i, profile.Name) } reader := bufio.NewReader(os.Stdin) fmt.Printf("Please select a profile to apply filters too: ") index := 0 for { selectedProfileIndex, _ := reader.ReadString('\n') index, err = strconv.Atoi(strings.TrimSuffix(selectedProfileIndex, "\n")) if err == nil && index < len(profiles.Items) { break } else { fmt.Println("Invalid input", index, err) } } GooFig.View = profiles.Items[index].Id } //Prepare filters fmt.Println("Preparing Filter - combining multiple Referrers") var filterList []string filterList = RefMerge(resp.ReferrerList, true) //Build new filters from ReferrerList struct fmt.Println("Creating referral filters") var FilterIds []string length = len(filterList) bar = pb.StartNew(length) bar.SetMaxWidth(80) for i, newFilter := range filterList { counter := strconv.Itoa(i + 1) filter := &analytics.Filter{ Name: "BotSifter Ref Spam" + counter, Type: "EXCLUDE", ExcludeDetails: &analytics.FilterExpression{ Field: "REFERRAL", ExpressionValue: newFilter, CaseSensitive: false, }, } filter, err = service.Management.Filters.Insert(AccountID, filter).Do() if err != nil { fmt.Print("\n") fmt.Println(err) return } //Save filter Ids for later FilterIds = append(FilterIds, filter.Id) bar.Increment() time.Sleep(time.Millisecond * 250) } bar.Finish() //Prepare filters fmt.Println("Preparing Filter - combining multiple User Agents") var filterListua []string filterListua = RefMerge(resp.UserAgentList, true) //Build new filters from ReferrerList struct fmt.Println("Creating User Agent filters") length = len(filterListua) bar = pb.StartNew(length) bar.SetMaxWidth(80) for i, newFilter := range filterListua { counter := strconv.Itoa(i + 1) filter := &analytics.Filter{ Name: "BotSifter UA Spam" + counter, Type: "EXCLUDE", ExcludeDetails: &analytics.FilterExpression{ Field: "USER_DEFINED_VALUE", ExpressionValue: newFilter, CaseSensitive: false, }, } filter, err = service.Management.Filters.Insert(AccountID, filter).Do() if err != nil { fmt.Print("\n") fmt.Println(err) return } //Save filter Ids for later FilterIds = append(FilterIds, filter.Id) bar.Increment() time.Sleep(time.Millisecond * 250) } bar.Finish() if len(hosts) != 0 { var hostList []string hostList = HostMerge(hosts, false) //If there's hosts build "include Hostname" rule(s) fmt.Println("Creating Hostname filter(s)") length = len(hostList) bar = pb.StartNew(length) bar.SetMaxWidth(80) for i, newHost := range hostList { counter := strconv.Itoa(i) filter := &analytics.Filter{ Name: "BotSifter Hostname Spam" + counter, Type: "INCLUDE", IncludeDetails: &analytics.FilterExpression{ Field: "PAGE_HOSTNAME", ExpressionValue: newHost, CaseSensitive: false, }, } filter, err = service.Management.Filters.Insert(AccountID, filter).Do() if err != nil { log.Println(err) return } //Save filter Ids for later FilterIds = append(FilterIds, filter.Id) bar.Increment() time.Sleep(time.Millisecond * 250) } bar.Finish() } //connecting built filters to profile user selected fmt.Println("Connecting filters to profile") length = len(FilterIds) bar = pb.StartNew(length) bar.SetMaxWidth(80) for _, newLink := range FilterIds { profilefilterlink := &analytics.ProfileFilterLink{ FilterRef: &analytics.FilterRef{Id: newLink}, } _, err := service.Management.ProfileFilterLinks.Insert(AccountID, WebPropertyId, GooFig.View, profilefilterlink).Do() if err != nil { log.Println("Error Connecting Filter to View\n") } bar.Increment() time.Sleep(time.Millisecond * 250) } bar.Finish() } fmt.Println("Saving configuration data to " + *configFile) //Marshal data to save into config file data, err = yaml.Marshal(&GooFig) if err != nil { log.Println(err) return } //Write config file err = ioutil.WriteFile(*configFile, data, 0644) if err != nil { log.Println(err) return } fmt.Println("Completed") }
func stat(cmd *cobra.Command, args []string) error { if len(args) != 1 { cmd.Help() return nil } token, err := getToken() if err != nil { return err } con, err := grpc.Dial(metaAddr, grpc.WithInsecure()) if err != nil { return err } defer con.Close() c := pb.NewMetaClient(con) benchStart := time.Now() total := 0 errorProbes := 0 errChan := make(chan error) resChan := make(chan string) doneChan := make(chan bool) limitChan := make(chan int, concurrencyFlag) for i := 0; i < concurrencyFlag; i++ { limitChan <- 1 } var bar *br.ProgressBar if progressBar { bar = br.StartNew(probesFlag) } for i := 0; i < probesFlag; i++ { go func() { <-limitChan defer func() { limitChan <- 1 }() in := &pb.StatReq{} in.AccessToken = token in.Path = args[0] in.Children = childrenFlag ctx := context.Background() _, err := c.Stat(ctx, in) if err != nil { errChan <- err return } doneChan <- true resChan <- "" }() } for { select { case _ = <-doneChan: total++ if progressBar { bar.Increment() } case _ = <-resChan: case err := <-errChan: log.Error(err) errorProbes++ total++ if progressBar { bar.Increment() } } if total == probesFlag { break } } if progressBar { bar.Finish() } numberRequests := probesFlag concurrency := concurrencyFlag totalTime := time.Since(benchStart).Seconds() failedRequests := errorProbes frequency := float64(numberRequests-failedRequests) / totalTime period := float64(1 / frequency) data := [][]string{ {"#NUMBER", "CONCURRENCY", "TIME", "FAILED", "FREQ", "PERIOD"}, {fmt.Sprintf("%d", numberRequests), fmt.Sprintf("%d", concurrency), fmt.Sprintf("%f", totalTime), fmt.Sprintf("%d", failedRequests), fmt.Sprintf("%f", frequency), fmt.Sprintf("%f", period)}, } w := csv.NewWriter(output) w.Comma = ' ' for _, d := range data { if err := w.Write(d); err != nil { return err } } w.Flush() if err := w.Error(); err != nil { return err } return nil }
// Given a file on the local disk, synchronize it with Google Drive: if the // corresponding file doesn't exist on Drive, it's created; if it exists // but has different contents, the contents are updated. The Unix // permissions and file modification time on Drive are also updated // appropriately. func syncFileUp(localPath string, stat os.FileInfo, drivePath string, encrypt bool, pb *pb.ProgressBar) error { debug.Printf("syncFileUp: %s -> %s", localPath, drivePath) // Get the *drive.File for the folder to create the new file in. // This folder should definitely exist at this point, since we // create all folders needed before starting to upload files. parentFolder, err := gd.GetFile(filepath.Dir(drivePath)) if err != nil { panic(fmt.Sprintf("%s: get parent directory: %s", filepath.Dir(drivePath), err)) } baseName := filepath.Base(drivePath) var driveFile *gdrive.File if stat.IsDir() { // We only get here if the folder doesn't exist at all on Drive; if // it already exists, we updated the metadata earlier (in // fileNeedsUpload) and don't go through this path. var proplist []gdrive.Property proplist = append(proplist, gdrive.Property{Key: "Permissions", Value: fmt.Sprintf("%#o", stat.Mode()&os.ModePerm)}) driveFile, err = gd.CreateFolder(baseName, parentFolder, normalizeModTime(stat.ModTime()), proplist) checkFatalError(err, fmt.Sprintf("%s: create folder", drivePath)) if pb != nil { pb.Increment() } atomic.AddInt64(&stats.UploadBytes, stat.Size()) verbose.Printf("Created Google Drive folder %s", drivePath) } else { // We're uploading a file. Create an empty file on Google Drive if // it doesn't already exist. if driveFile, err = gd.GetFile(drivePath); err == gdrive.ErrNotExist { debug.Printf("%s doesn't exist on Drive. Creating", drivePath) var proplist []gdrive.Property if encrypt { // Compute a unique IV for the file. iv := getRandomBytes(aes.BlockSize) ivhex := hex.EncodeToString(iv) proplist = append(proplist, gdrive.Property{Key: "IV", Value: ivhex}) } proplist = append(proplist, gdrive.Property{Key: "Permissions", Value: fmt.Sprintf("%#o", stat.Mode()&os.ModePerm)}) // We explicitly set the modification time of the file to the // start of the Unix epoch, so that if the upload fails // partway through, then we won't later be confused about which // file is the correct one from having local and Drive copies // with the same time but different contents. driveFile, err = gd.CreateFile(baseName, parentFolder, time.Unix(0, 0), proplist) if err != nil { return err } } // And now upload the contents of the file, either overwriting the // contents of the existing file, or adding contents to the // just-created file. if err = uploadFileContents(localPath, driveFile, encrypt, pb); err != nil { return err } } verbose.Printf("Updated local %s -> Google Drive %s", localPath, drivePath) // Only update the modification time on Google Drive to match the local // modification time after the upload has finished successfully. return gd.UpdateModificationTime(driveFile, normalizeModTime(stat.ModTime())) }
func main() { clientID := flag.String("id", "", "Github client ID") clientSecret := flag.String("secret", "", "Github client secret") output := flag.String("output", "gddoexp.out", "Output file") progress := flag.Bool("progress", false, "Show a progress bar") flag.Parse() var auth *gddoexp.GithubAuth if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") { if *clientID == "" || *clientSecret == "" { fmt.Println("to enable Gthub authentication, you need to inform the id and secret") flag.PrintDefaults() return } auth = &gddoexp.GithubAuth{ ID: *clientID, Secret: *clientSecret, } } // add cache to avoid repeated requests to Github gddoexp.HTTPClient = &http.Client{ Transport: httpcache.NewTransport( diskcache.New(path.Join(os.Getenv("HOME"), ".gddoexp")), ), } db, err := database.New() if err != nil { fmt.Println("error connecting to database:", err) return } pkgs, err := db.AllPackages() if err != nil { fmt.Println("error retrieving all packages:", err) return } file, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Println("error creating output file:", err) return } defer file.Close() log.SetOutput(file) log.Println("BEGIN") log.Printf("%d packages will be analyzed", len(pkgs)) var progressBar *pb.ProgressBar if progress != nil && *progress { progressBar = pb.StartNew(len(pkgs)) } var cache int for response := range gddoexp.ShouldSuppressPackages(pkgs, db, auth) { if progress != nil && *progress { progressBar.Increment() } if response.Cache { cache++ } if response.Error != nil { log.Println(response.Error) } else if response.Suppress { log.Printf("package “%s” should be suppressed\n", response.Package.Path) if progress != nil && !*progress { fmt.Println(response.Package.Path) } } } if progress != nil && *progress { progressBar.Finish() } log.Println("Cache hits:", cache) log.Println("END") }