Пример #1
0
func HoldAuctionsFor(client types.RepPoolClient, instances []instance.Instance, representatives []string, rules types.AuctionRules, communicator types.AuctionCommunicator) ([]types.AuctionResult, time.Duration) {
	fmt.Printf("\nStarting Auctions\n\n")
	bar := pb.StartNew(len(instances))

	t := time.Now()
	semaphore := make(chan bool, rules.MaxConcurrent)
	c := make(chan types.AuctionResult)
	for _, inst := range instances {
		go func(inst instance.Instance) {
			semaphore <- true
			c <- communicator(types.AuctionRequest{
				Instance: inst,
				RepGuids: representatives,
				Rules:    rules,
			})
			<-semaphore
		}(inst)
	}

	results := []types.AuctionResult{}
	for _ = range instances {
		results = append(results, <-c)
		bar.Increment()
	}

	bar.Finish()

	return results, time.Since(t)
}
Пример #2
0
func main() {
	// Init...
	cfg := Config{
		CommonConfig: app.CommonConfig{
			MetaSize: 8,
			TileSize: 256,
		},
	}

	app.App.Configure("Copy", &cfg)

	if cfg.Copy.Threads < 1 {
		cfg.Copy.Threads = 1
	}

	fromI, err := plugins.DefaultPluginStore.Create(cfg.Copy.From.Plugin, cfg.Copy.From.PluginConfig)
	if err != nil {
		log.Fatal(err)
	}
	from, ok := fromI.(gopnik.CachePluginInterface)
	if !ok {
		log.Fatal("Invalid cache plugin type")
	}

	toI, err := plugins.DefaultPluginStore.Create(cfg.Copy.To.Plugin, cfg.Copy.To.PluginConfig)
	if err != nil {
		log.Fatal(err)
	}
	to, ok := toI.(gopnik.CachePluginInterface)
	if !ok {
		log.Fatal("Invalid cache plugin type")
	}

	// Load plan...
	coords, err := loadPlanFile()
	if err != nil {
		log.Fatal(err)
	}

	// Process...
	bar := pb.StartNew(len(coords))
	var barMu sync.Mutex
	var wg sync.WaitGroup

	for k := 0; k < cfg.Copy.Threads; k++ {
		wg.Add(1)
		go func(k int) {
			defer wg.Done()
			for i := k; i < len(coords); i += cfg.Copy.Threads {
				copyMetaTile(coords[i], &cfg, from, to)

				barMu.Lock()
				bar.Increment()
				barMu.Unlock()
			}
		}(k)
	}
	wg.Wait()
	bar.FinishPrint("Done")
}
Пример #3
0
Файл: game.go Проект: Rompei/lgb
func (g *Game) getInitTweets(aliveNum int) {
	var progress *pb.ProgressBar
	if !g.debug {
		progress = pb.StartNew(aliveNum)
	}
	for y := 0; y < g.field.SizeY; y++ {
		for x := 0; x < g.field.SizeX; x++ {
			if g.field.Points[y][x].IsAlive {
				tweet := <-g.tweetCh
				if g.debug {
					emoji.Printf(":bird:Points[%v][%v]: %v\n", x, y, tweet)
				}
				g.field.Points[y][x].Str = tweet
				if !g.debug {
					progress.Increment()
				}
			}
		}
	}
	if g.debug {
		emoji.Println(":smile::smile::smile:Collected initial tweets:smile::smile::smile:")
	} else {
		e := emoji.Sprint(":smile::smile::smile:")
		progress.FinishPrint(e + "Collected initial tweets" + e)
	}
}
Пример #4
0
func HostMerge(List []Host, ShowBar bool) []string {
	count := 0
	filterList := []string{""}
	length := len(List)
	var bar *pb.ProgressBar
	if ShowBar == true {
		bar = pb.StartNew(length)
		bar.SetMaxWidth(80)
	}
	for _, Host := range List {
		length = len(filterList[count])
		if length == 0 {
			filterList[count] = Host.Hostname
		} else if length+Host.length() <= 255 && length != 0 {
			filterList[count] += "|"
			filterList[count] += Host.Hostname
		} else {
			count++
			filterList = append(filterList, Host.Hostname)
			// filterList[count] = Ref.Referrer
		}
		if ShowBar == true {
			bar.Increment()
			time.Sleep(time.Millisecond * 50)
		}
	}
	if ShowBar == true {
		bar.Finish()
	}
	return filterList
}
Пример #5
0
func doUpload(filelist []string) {
	length := len(filelist)

	if length == 0 {
		fmt.Println("No new tiles to upload")
		return
	}

	pb.BarStart = "["
	pb.BarEnd = "]"
	pb.Empty = " "
	pb.Current = "#"
	pb.CurrentN = ">"

	bar := pb.StartNew(length)

	c := initWorkers()
	go func() {
		for i := 0; i < length; i++ {
			uploadFile(filelist[i])
		}
	}()

	var total int
	for total < length {
		select {
		case <-c:
			total = total + 1
			bar.Increment()
		}
	}

	bar.Finish()
}
Пример #6
0
func main() {
	jobs := makeJobs(ipNetParsed, 100)
	out := make(chan ILOInfo, 100)
	ipNetLen := len(ipNetParsed)

	scanbar := pb.StartNew(ipNetLen)
	scanbar = scanbar.Prefix("Scan net")
	scanbar.ShowTimeLeft = false

	wg := new(sync.WaitGroup)
	//Запуск воркеров
	for _, job := range jobs {
		wg.Add(1)
		go scan(job, out, scanbar, wg)
	}

	wg.Wait()
	close(out)

	ilo := []ILOInfo{}
	for info := range out {
		ilo = append(ilo, info)
	}
	scanbar.Finish()
	tableRender(ilo)
	fmt.Println("")
}
Пример #7
0
func GenerateTestData(
	directory string,
	fileCount, rowCount int,
	timeOrigin int64, timeDistance int,
	uidCount int,
	domainsFilename string,
	geoOrigin util.GeoPoint, geoDistance int,
) (filenames []string, err error) {
	domains, err := newDomains(domainsFilename)
	if err != nil {
		return nil, err
	}
	filenames = make([]string, fileCount)
	progressBar := pb.StartNew(fileCount)
	for i := 0; i < fileCount; i++ {
		filename, err := generateTestDataFile(
			directory,
			rowCount,
			timeOrigin, timeDistance,
			uidCount,
			domains,
			geoOrigin, geoDistance,
		)
		if err != nil {
			return nil, err
		}
		progressBar.Increment()
		filenames[i] = filename
	}
	progressBar.FinishPrint("All done!")
	return filenames, nil
}
Пример #8
0
func main() {
	// declare flags
	regionPtr := flag.String("region", "us-east-2", "AWS Region(default: us-east-2)")
	bucketPtr := flag.String("bucket", "backup", "Bucket path you wish to upload")
	srcPtr := flag.String("src", os.Getenv("PWD"), "src folder of files (default this folder)")
	keyPtr := flag.String("key", os.Getenv("AWS_ACCESS_KEY_ID"), "AWS_ACCESS_KEY_ID")
	secretPtr := flag.String("secret", os.Getenv("AWS_SECRET_ACCESS_KEY"), "AWS_SECRET_ACCESS_KEY")
	destPtr := flag.String("dest", "new", "Destination path of root directory where you wish the contents to go in the bucket, \nif your connection continues to timeout try lowering it")
	limitPtr := flag.Int("limit", "4", "Number of concurrent uploads")

	flag.BoolVar(&timeStamp, "timestamp", false, "append commitstamp to the destination the commit")
	flag.BoolVar(&debug, "debug", false, "enable debug mode")

	flag.Parse()

	p := []string{
		*destPtr,
	}

	if timeStamp {
		p = append(p, strconv.FormatInt(time.Now().Unix(), 10))
	}

	dest := strings.Join(p, ".")
	fmt.Println(dest)
	simpleS3 := s5.New(*srcPtr, dest, *bucketPtr, *keyPtr, *secretPtr, *regionPtr, *limitPtr, debug)
	bar := pb.StartNew(simpleS3.FileCount)
	bar.ShowTimeLeft = false
	bar.Format("[⚡- ]")

	simpleS3.Run(func() {
		bar.Increment()
	})
	bar.FinishPrint(fmt.Sprintf("Completed Uploading to %s/%s!", simpleS3.Bucket, simpleS3.Dest))
}
Пример #9
0
func (b *Boom) init() {
	if b.Client == nil {
		b.Client = &http.Client{}
	}
	b.results = make(chan *result, b.N)
	b.bar = pb.StartNew(b.N)
	b.start = time.Now()
}
Пример #10
0
func ReadDB(c *client.Client, sdb, ddb, cmd string) client.BatchPoints {

	q := client.Query{
		Command:  cmd,
		Database: sdb,
	}

	//get type client.BatchPoints
	var batchpoints client.BatchPoints

	response, err := c.Query(q)
	if err != nil {
		fmt.Printf("Fail to get response from database, read database error: %s\n", err.Error())
	}

	res := response.Results
	if len(res) == 0 {
		fmt.Printf("The response of database is null, read database error!\n")
	} else {

		res_length := len(res)
		for k := 0; k < res_length; k++ {

			//show progress of reading series
			count := len(res[k].Series)
			bar := pb.StartNew(count)
			for _, ser := range res[k].Series {

				//get type client.Point
				var point client.Point

				point.Measurement = ser.Name
				point.Tags = ser.Tags
				for _, v := range ser.Values {
					point.Time, _ = time.Parse(time.RFC3339, v[0].(string))

					field := make(map[string]interface{})
					l := len(v)
					for i := 1; i < l; i++ {
						if v[i] != nil {
							field[ser.Columns[i]] = v[i]
						}
					}
					point.Fields = field
					point.Precision = "s"
					batchpoints.Points = append(batchpoints.Points, point)
				}
				bar.Increment()
				time.Sleep(3 * time.Millisecond)
			}
			bar.FinishPrint("Read series has finished!\n")
		}
		batchpoints.Database = ddb
		batchpoints.RetentionPolicy = "default"
	}
	return batchpoints
}
Пример #11
0
func runDBTasks(ids []int64, task func(int64) error) {

	// Progress bar
	bar := pb.StartNew(len(ids))

	// Cancel control
	done := make(chan struct{})
	quit := false

	// IDs to process, sent via channel
	tasks := make(chan int64, len(ids)+1)

	var errs []error
	var wg sync.WaitGroup

	// Only fire up a set number of worker processes
	for i := 0; i < getGophers(len(ids)); i++ {
		wg.Add(1)

		go func() {
			for id := range tasks {
				err := doTask(id, task, done)
				if err != nil {
					if !quit {
						close(done)
						quit = true
					}
					errs = append(
						errs,
						fmt.Errorf("Failed on ID %d : %+v", id, err),
					)
					break
				}
				bar.Increment()
			}
			wg.Done()
		}()
	}

	for _, id := range ids {
		tasks <- id
	}
	close(tasks)

	wg.Wait()
	if !quit {
		close(done)
	}

	if len(errs) == 0 {
		bar.Finish()
	}

	for _, err := range errs {
		handleErr(err)
	}
}
Пример #12
0
func (c *ContentQueue) Wait() {
	finished := 0
	bar := pb.StartNew(len(c.items))
	for finished < len(c.items) {
		<-c.items[finished].Result
		finished++
		bar.Increment()
	}
	bar.Finish()
}
Пример #13
0
func (b *Boom) init() {
	if b.Client == nil {
		tr := &http.Transport{
			TLSClientConfig: &tls.Config{InsecureSkipVerify: b.AllowInsecure},
		}
		b.Client = &http.Client{Transport: tr}
	}
	b.results = make(chan *result, b.N)
	b.bar = pb.StartNew(b.N)
	b.start = time.Now()
}
Пример #14
0
func main() {
	count := 6000
	bar := pb.StartNew(count)
	for i := 0; i < count; i++ {
		bar.Increment()
		time.Sleep(time.Millisecond)
	}
	bar.ShowBar = true
	bar.ShowSpeed = true
	bar.FinishPrint("The End!")
}
Пример #15
0
func unzip(filename, dest, ver string) {
	var path string
	if filename == "" {
		fmt.Println("Can't unzip ", filename)
		os.Exit(1)
	}

	/* if filename[:2] == "go" {
		dest = dest + ps + strings.Replace(filename, "go", "go"+ps+ver, 1)
	} else if filename[:2] == "li" {
		dest = dest + ps + strings.Replace(filename, "liteide", "liteide"+ps+ver, 1)
	} */

	reader, err := zip.OpenReader(filename)
	checkErr("Extract error::OpenArchive", err)
	defer reader.Close()

	fl := int(len(reader.Reader.File))
	bar := pb.StartNew(fl)
	bar.ShowPercent = true
	bar.ShowCounters = false
	bar.ShowTimeLeft = false
	bar.Prefix("Extracting " + filename[strings.LastIndex(filename, ps)+1:] + " ")
	bar.Start()
	for _, f := range reader.Reader.File {
		zipped, err := f.Open()
		checkErr("Extract error::", err)
		defer zipped.Close()

		// path := filepath.Join(dest, ver, "./", f.Name)
		if f.Name[:2] == "go" {
			path = filepath.Join(dest, "./", strings.Replace(f.Name, "go", "go"+ps+ver, 1))
		} else if f.Name[:2] == "li" {
			path = filepath.Join(dest, "./", strings.Replace(f.Name, "liteide", "liteide"+ps+ver, 1))
		}
		// fmt.Println(path)

		if f.FileInfo().IsDir() {
			os.MkdirAll(path, f.Mode())
		} else {
			writer, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, f.Mode())
			checkErr("Extract error::OpenFileFromArchive", err)
			defer writer.Close()

			if _, err = io.Copy(writer, zipped); err != nil {
				fmt.Println(err)
				os.Exit(1)
			}
		}
		//  progress = (i / fl) * 100
		bar.Increment()
	}
	bar.Finish()
}
Пример #16
0
func SetNecessaryExifData(
	osi OsInterface, exifi ExifInterface, filepairs FilePairs) FilePairs {
	bar := pb.StartNew(len(filepairs))
	for count := range filepairs {
		bar.Increment()
		filepairs[count].DateTime = strings.Replace(strings.Split(
			*ExifGetDatetime(osi, exifi, filepairs[count].Raw),
			" ")[0][2:], ":", "", -1)
	}
	bar.Finish()
	return filepairs
}
Пример #17
0
// TODO: File cookiejar
func main() {
	creds, err := loadCredentials()
	if err != nil {
		fmt.Printf("Failed to load credentials. (%s)\n", err)
		return
	}
	if len(os.Args) == 1 {
		fmt.Printf("Usage: %s <query>\n", os.Args[0])
		return
	}
	// Form a query by joining all the arguments.
	query := strings.Join(os.Args[1:], " ")

	client := megashares.New()
	// Attempt to login.
	for {
		if err := client.Login(creds.Username, creds.Password); err != nil {
			fmt.Printf("Failed to login: %s\n", err)
			creds = askForCredentials()
			// log.Fatalf("Couldn't login! Reason: %s\n", err)
		} else {
			break
		}
	}

	// Perform the search
	entries, _ := client.SearchEntries(query)

	// Print out the results of the search for the user to pick from.
	for i, entry := range entries {
		fmt.Fprintf(os.Stderr, "%d: %s\n", i, entry.Filename)
	}

	// Get a valid number to choose from from the input loop.
	// TODO: Allow for pagination by returning (choice, page).
	choice := getValidNumber(0, len(entries)-1)
	entry := entries[choice]

	fmt.Print(entry.Url)
	return
	if file, response, err := ContinueDownload(client.Client, entry.Filename, entry.Url); err != nil {
		log.Fatal(err)
	} else {
		defer file.Close()
		defer response.Body.Close()
		length := response.ContentLength
		// Initialize progress bar.
		bar := pb.StartNew(int(length)).SetUnits(pb.U_BYTES)
		bar.ShowSpeed = true
		writer := io.MultiWriter(file, bar)
		io.Copy(writer, response.Body)
	}
}
func main() {
	//defer profile.Start().Stop()
	kingpin.Version("0.0.1")
	kingpin.Parse()

	// check if config dir exists
	finfo, err := os.Stat(*confDir)
	if err != nil {
		log.Fatalf("Config directory %s does not exist\n", *confDir)
	}
	if !finfo.IsDir() {
		log.Fatalf("Config directory %s is not a directory\n", *confDir)
	}

	finfo, err = os.Stat(*templateDir)
	if err != nil {
		log.Printf("Config directory %s does not exist, try to fallback to templates/\n", *templateDir)
		*templateDir = "templates"
		finfo, err = os.Stat(*templateDir)
		if err != nil {
			log.Fatalf("Failed, no template directory found\n")
		}
	}
	templates = template.Must(template.ParseGlob(*templateDir + "/*.tmpl"))

	log.Printf("Create %d hosts with %d services each", *numHosts, *numServices)
	bar := pb.StartNew(*numHosts)
	// create a syncgroup to make sure all templates are written before exiting
	var wg sync.WaitGroup
	concurrency := 20 // limit number of parallel goroutines
	sem := make(chan bool, concurrency)
	for host := 0; host < *numHosts; host++ {
		sem <- true
		// add us to the syncgroup
		wg.Add(1)
		go func() {
			// remove from syncgroup if done
			defer wg.Done()
			defer func() { <-sem }()
			genHost()
			bar.Increment()
		}()
	}
	for i := 0; i < cap(sem); i++ {
		sem <- true
	}
	// exit if all subroutines are done
	wg.Wait()
	bar.FinishPrint("The End!")
}
Пример #19
0
func main() {
	// fmt.Println("start")
	data := parseCsv(ListIp, Conf.Sep)
	bar := pb.StartNew(len(data))
	bar.ShowPercent = false
	for i := 0; i < len(data); i++ {
		bar.Increment()
		bar.Postfix(" " + data[i][0])

		if (len(data[i]) > 2 && len(data[i][1]) == 0) || (len(data[i]) == 1) || All {

			var line = make([]string, Conf.Length)
			ip := data[i][0]
			// fmt.Println(ip)
			line[Conf.SaveOrder["ip"]] = ip

			for k := 1; k < Conf.Length; k++ {
				line[k] = ""
			}
			for _, field := range Conf.Fields {
				jsonbody, err := request(ip, field.Url)
				if err != nil {
					fmt.Println(err.Error())
					continue
				}

				for key, filter := range field.Filter {
					// fmt.Println(filter)
					_, ret := search(jsonbody, filter, 0)
					// data[i][]
					// fmt.Println(ret)
					if len(filter.Split) > 0 {
						aret := regexp.MustCompile(filter.Split).Split(ret, -1)
						for j := 0; j < len(aret); j++ {
							line[Conf.SaveOrder[key+"_"+strconv.Itoa(j)]] = line[Conf.SaveOrder[key+"_"+strconv.Itoa(j)]] + strings.TrimSpace(aret[j])
						}
					} else {
						line[Conf.SaveOrder[key]] = line[Conf.SaveOrder[key]] + strings.TrimSpace(ret)
					}
				}
			}
			// fmt.Println(line)
			linesave(line, ip)
			// fmt.Println(ip)
			// os.Exit(0)
		}
	}
	bar.FinishPrint("End")
}
Пример #20
0
func main() {
	targets := findTarget()
	bar = pb.StartNew(numOfSoldiers * numOfBattalions * len(targets))
	bar.Format("<.- >")
	var missionIssuedWg sync.WaitGroup
	for _, target := range targets {
		missionIssuedWg.Add(1)
		ht := hitRequest{}
		ht.Initialize()
		ht.url = target
		go deploy(ht, &missionIssuedWg)
	}
	missionIssuedWg.Wait()
	bar.FinishPrint("Victory!")
}
Пример #21
0
func main() {

	var progress *pb.ProgressBar

	page := 0
	category := "bugs"

	for {

		printf("Get page: %d", page)
		tickets, _, err := sfClient.Tracker.Info(category)

		if err != nil {
			log.Fatal(err)
		}

		if ghMilestones == nil {
			createMilestones(tickets)
			getMilestones()
		}

		if progress == nil {
			log.Println("Creating tickets")
			progress = pb.StartNew(tickets.Count)
		}

		if len(tickets.Tickets) == 0 {
			break
		}

		for _, ticket := range tickets.Tickets {
			ticket, _, err := sfClient.Tracker.Get(category, ticket.TicketNum)

			if err != nil {
				log.Fatal(err)
			}

			sfTicketToGhIssue(ticket, category)

			progress.Increment()
			time.Sleep(time.Millisecond * sleepTime)
		}

		page += 1
	}

	progress.FinishPrint("All tickets imported")
}
Пример #22
0
Файл: pi.go Проект: marktai/math
func SpawnWorkers(terms, start, chunk int64, workers int) float64 {
	cur := start
	workersSpawned := 0
	workersToSpawn := int(math.Ceil(float64(terms) / float64(chunk)))
	outChan := make(chan float64, workers)
	quit := make(chan int, 1)

	if workers > workersToSpawn {
		workers = workersToSpawn
	}

	bar := pb.StartNew(workersToSpawn)

	for i := 0; i < workers; i++ {
		outChan <- float64(0)
	}

	var total float64

	for breaker := false; !breaker; {
		select {
		case result := <-outChan:

			total += result

			go func(start, chunk int64) {
				result := worker(start, chunk)
				bar.Increment()
				outChan <- result
			}(cur, chunk)

			workersSpawned += 1
			cur += chunk * 2

			if workersSpawned >= workersToSpawn {
				quit <- 0
			}
		case <-quit:
			breaker = true
		}
	}

	for i := 0; i < workers; i++ {
		total += <-outChan
	}

	return total * 4
}
Пример #23
0
func main() {

	if len(os.Args) < 5 {
		usage()
	}

	from := os.Args[1]
	to := os.Args[2]
	keys := os.Args[3]
	threads, err := strconv.Atoi(os.Args[4])
	if err != nil {
		log.Fatal("Main: threads conversion error: ", err)
	}

	if threads <= 0 {
		log.Fatal("Main: threads must be > 0")
	}

	pipe := New(from, to, keys, threads)
	pipes, ch := pipe.Init()

	all_keys := pipes[0].Keys()

	count := len(all_keys)
	bar := pb.StartNew(count)
	bar.ShowPercent = true
	bar.ShowBar = true
	bar.ShowCounters = true
	bar.ShowTimeLeft = true
	bar.ShowSpeed = true

	for _, v := range all_keys {
		op := Op{v, 0, nil}
		ch <- op
		bar.Increment()
	}

	for i := 0; i < pipe.threads; i++ {
		repch := make(chan bool, 1)
		op := Op{"", 1, repch}
		ch <- op
		_ = <-repch
	}

	bar.FinishPrint("Done.")

}
Пример #24
0
func output(w io.Writer, primes []uint64) {
	bar := pb.StartNew(len(primes))
	bar.SetMaxWidth(80)
	bar.ShowCounters = false

	for k, p := range primes {
		bar.Increment()

		for l := uint8(0); l < 64; l++ {
			if (p>>l)&1 == 0 {
				fmt.Fprintln(w, (k*64+int(l))*2+3)
			}
		}
	}

	bar.Finish()
}
Пример #25
0
func mycopy(dst io.Writer, src io.Reader, total int64) {
	s := 0
	b := make([]byte, 4096)
	stop := false
	bar := pb.StartNew(int(total))

	bar.ShowSpeed = true

	go func() {
		for !stop {
			time.Sleep(time.Second)
			bar.Set(s)
			bar.Update()
			//log.Printf("%d/%d  %dk/s\n", s, total, int(float64(s)/1024.0/time.Now().Sub(start).Seconds()))
		}

	}()
	defer func() {
		stop = true
		bar.Set(s)
		bar.Update()
		if int64(s) == total {

			bar.FinishPrint("Finished!")
		} else {
			bar.FinishPrint("something wrong!")
		}
	}()
	for {
		n, err := src.Read(b)
		if n > 0 {
			s += n
			_, err := dst.Write(b[:n])
			if err != nil {
				//log.Println(err)
				return
			}
		}
		if err != nil {
			//log.Println(err)
			return
		}
	}

}
Пример #26
0
func learnEngine(n *neural.Network) {

	gofiles := getGoFiles()
	rbfiles := getRbFiles()
	jsfiles := getJsFiles()

	count := 1000
	bar := pb.StartNew(count)

	for i := 0; i < count; i++ {
		bar.Increment()
		learnLangFile(n, gofiles[rand.Intn(len(gofiles))], []float64{1, 0, 0})
		learnLangFile(n, rbfiles[rand.Intn(len(rbfiles))], []float64{0, 1, 0})
		learnLangFile(n, jsfiles[rand.Intn(len(jsfiles))], []float64{0, 0, 1})
	}
	bar.Finish()

}
Пример #27
0
func (e *Engine) InitNLP() {
	e.semaphore.Lock()
	defer e.semaphore.Unlock()
	if e.Ready {
		return
	}
	Infoln("Init Natural Language Processing Engine")
	initialized := false
	count := 80
	bar := pb.StartNew(count)
	bar.ShowPercent = true
	bar.ShowCounters = false

	inc := func() {
		for i := 0; i < 10; i++ {
			bar.Increment()
		}
	}

	start := time.Now().UnixNano()
	nlpOptions := nlp.NewNLPOptions(path+"data/", lang, inc)
	nlpOptions.Severity = nlp.ERROR
	nlpOptions.TokenizerFile = "tokenizer.dat"
	nlpOptions.SplitterFile = "splitter.dat"
	nlpOptions.TaggerFile = "tagger.dat"
	nlpOptions.ShallowParserFile = "chunker/grammar-chunk.dat"
	nlpOptions.SenseFile = "senses.dat"
	nlpOptions.UKBFile = "" //"ukb.dat"
	nlpOptions.DisambiguatorFile = "common/knowledge.dat"

	macoOptions := nlp.NewMacoOptions(lang)
	macoOptions.SetDataFiles("", path+"data/common/punct.dat", path+"data/"+lang+"/dicc.src", "", "", path+"data/"+lang+"/locucions-extended.dat", path+"data/"+lang+"/np.dat", "", path+"data/"+lang+"/probabilitats.dat")

	nlpOptions.MorfoOptions = macoOptions

	nlpEngine := nlp.NewNLPEngine(nlpOptions)

	stop := time.Now().UnixNano()
	delta := (stop - start) / (1000 * 1000)
	initialized = true
	bar.FinishPrint(fmt.Sprintf("Data loaded in %dms", delta))
	e.NLP = nlpEngine
	e.Ready = initialized
}
Пример #28
0
func main() {
	limpa()
	fmt.Print("\rteste\n")
	/*	fmt.Print("\rteste\n")*/
	count := 10
	bar := pb.StartNew(count)
	bar.SetRefreshRate(time.Millisecond * 10)
	w := bufio.NewWriter(os.Stdout)

	for i := 0; i < count; i++ {
		fmt.Fprint(w, "Hello, ")
		fmt.Fprint(w, "world!")

		bar.Increment()

		//fmt.Print("\rteste\n")
		time.Sleep(time.Second)
	}
	bar.FinishPrint("The End!")
	w.Flush() // Don't forget to flush!
}
Пример #29
0
func start(tracks map[string]*Track) {
	queue := make(chan *Track)

	bar := pb.StartNew(len(tracks))

	var wg sync.WaitGroup

	wg.Add(workers)
	for i := 0; i < workers; i++ {
		go work(&wg, bar, queue)
	}

	for _, track := range tracks {
		queue <- track
	}

	close(queue)
	wg.Wait()

	bar.FinishPrint("Done!")
}
Пример #30
0
func (self *httpUtils) Run(handle Handler, URL string, times int) {
	fileName := utils.RandomUtils.GetFileName()
	fileObject, err := os.Create(fileName)
	if err != nil {
		fmt.Println(err.Error())
		fmt.Println("create file faile.")
	}
	bar := pb.StartNew(times)
	for i := 0; i < times; i++ {
		bar.Increment()
		URLLink := URL
		if !strings.Contains(URL, "?") {
			URLPARAM = ParamsParse.UrlUtils.GetUrlLinkByParams("./config/myconfig")
			URLRANDOMPARAM = ParamsParse.UrlUtils.GetUrlLinkByRandomParams("./config/RandomParamsConfig")
			URLLink = URL + "?" + URLPARAM + URLRANDOMPARAM
		}
		response := handle(URLLink)
		fileObject.Write([]byte(response + systeminfo.NEWLINE))
	}
	bar.FinishPrint("The End!")
	fileObject.Close()
}