Esempio n. 1
0
File: main.go Progetto: cnaize/quiz
func main() {
	fmt.Println("Running..")

	var inPath string
	flag.StringVar(&inPath, "in", "word.list", "path to file with words")
	flag.Parse()

	// load words
	wordList, err := loadWords(inPath)
	if err != nil {
		panic(fmt.Sprintf("can't load input file: %+v\n", err))
	}

	numCPU := runtime.NumCPU()

	// create pool
	mypool := pool.New(numCPU)
	mypool.Run()

	allWords := wordList.AllWords()

	var i int
	var found bool
	var res string
	for {
		stats := mypool.Status()
		if !found {
			// add payload depending on "numCPU" and running workers
			for j := 0; j < numCPU-stats.Running; j++ {
				if i < len(allWords) {
					mypool.Add(words.HandleWord, allWords[i], wordList)
					i++
				}
			}
		}

		job := mypool.WaitForJob()
		if job == nil {
			break
		}

		jres, ok := job.Result.(string)
		if !ok {
			panic("job: invalid result type")
		}

		if len(jres) > len(res) {
			res = jres
			found = true
		}

		if found && stats.Completed == stats.Submitted {
			break
		}
	}

	fmt.Printf("Result: %s, len - %d\n", res, len(res))
}
Esempio n. 2
0
// DiscoverMiner is a DiscoveryItemHandlerFunc for key `cgminer.discovery` which returns JSON
// encoded discovery data for all running cgminer
func DiscoverMiner(request []string) (lld.DiscoveryData, error) {
	// init discovery data
	d := make(lld.DiscoveryData, 0)

	discoverypool := pool.New(4)
	discoverypool.Run()

	go sendDiscoveryMsg(mCastReport)
	l, err := net.ListenUDP("udp", listenAddr)
	if err != nil {
		return nil, fmt.Errorf("Unable to listen on %s: %s", err.Error())
	}
	l.SetReadBuffer(maxDatagramSize)
	l.SetReadDeadline(time.Now().Add(2 * time.Second))
	for {
		b := make([]byte, maxDatagramSize)
		n, addr, err := l.ReadFromUDP(b)
		if err != nil {
			break
		}
		if isMyAddress(addr.IP) {
			continue
		}
		msg := strings.Split(string(b[:n]), "-")
		if len(msg) < 3 {
			continue
		}
		port, err := strconv.ParseInt(msg[2], 10, 64)
		if err == nil {
			discoverypool.Add(DiscoverDevs, port)
		}
	}

	//  status := mypool.Status()
	//  log.Println(status.Submitted, "submitted jobs,", status.Running, "running,", status.Completed, "completed.")
	discoverypool.Wait()
	completed_jobs := discoverypool.Results()
	for _, job := range completed_jobs {
		if job.Result == nil {
			// TODO: handle this
			log.Println("got error:", job.Err)
		} else {
			item := job.Result.(lld.DiscoveryData)
			if item != nil {
				d = append(d, item...)
			}
		}
	}

	return d, nil
}
Esempio n. 3
0
func main() {
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)
	num_jobs := float64(1000)

	// classical usage: add all the jobs then wait untill all are done
	log.Println("*** classical usage ***")
	mypool := pool.New(cpus)
	mypool.Run()
	for i := float64(0); i < num_jobs; i++ {
		mypool.Add(work, i)
	}
	status := mypool.Status()
	log.Println("after adding all the jobs:")
	log.Println(status.Submitted, "submitted jobs,", status.Running, "running,", status.Completed, "completed.")
	mypool.Wait()
	sum := float64(0)
	completed_jobs := mypool.Results()
	for _, job := range completed_jobs {
		if job.Result == nil {
			log.Println("got error:", job.Err)
		} else {
			sum += job.Result.(float64)
		}
	}
	log.Println(sum)
	mypool.Stop()

	// alternative scenario: use one result at a time as it becomes available
	log.Println("*** using one result at a time as it becomes available ***")
	mypool = pool.New(cpus)
	mypool.Run()
	for i := float64(0); i < num_jobs; i++ {
		mypool.Add(work, i)
	}
	sum = float64(0)
	for {
		job := mypool.WaitForJob()
		if job == nil {
			break
		}
		if job.Result == nil {
			log.Println("got error:", job.Err)
		} else {
			sum += job.Result.(float64)
		}
	}
	status = mypool.Status()
	log.Println("after getting all the results:")
	log.Println(status.Submitted, "submitted jobs,", status.Running, "running,", status.Completed, "completed.")
	log.Println(sum)
	mypool.Stop()

	// stopping and restarting the pool
	log.Println("*** stopping and restarting the pool ***")
	mypool = pool.New(cpus)
	mypool.Run()
	for i := float64(0); i < num_jobs; i++ {
		mypool.Add(work, i)
	}
	sum = float64(0)
	status = mypool.Status()
	mypool.Stop()
	log.Println("after stopping:")
	log.Println(status.Submitted, "submitted jobs,", status.Running, "running,", status.Completed, "completed.")
	mypool.Run()
	mypool.Wait()
	completed_jobs = mypool.Results()
	for _, job := range completed_jobs {
		if job.Result == nil {
			log.Println("got error:", job.Err)
		} else {
			sum += job.Result.(float64)
		}
	}
	status = mypool.Status()
	log.Println("after restarting and getting all the results:")
	log.Println(status.Submitted, "submitted jobs,", status.Running, "running,", status.Completed, "completed.")
	log.Println(sum)
	mypool.Stop()
}
Esempio n. 4
0
		return crawlResult{}
	}
	body, urls, err := fetcher.Fetch(url)
	return crawlResult{body, urls, err}
}

func url_already_processed(urls []string, u string) bool {
	for _, url := range urls {
		if url == u {
			return true
		}
	}
	return false
}

var mypool = pool.New(6) // number of workers

func main() {
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)
	mypool.Run()
	first_url := "http://golang.org/"
	seen_urls := []string{first_url}
	mypool.Add(work, first_url, 4, fetcher)
	for {
		job := mypool.WaitForJob()
		if job == nil {
			break
		}
		if job.Result == nil {
			fmt.Println("got error:", job.Err)
Esempio n. 5
0
	"syscall"
	"time"

	"github.com/bmizerany/pat"
	"github.com/mmartin101/feed2json/db"
	"github.com/mmartin101/feed2json/models"
	"github.com/stefantalpalaru/pool"
)

type feedFetchResult struct {
	feed *models.Feed
	err  *_Error
}

// WorkerPool
var WorkerPool = pool.New(2)
var backoff time.Duration

func main() {
	dbMan, err := db.NewPostgresDBManager()
	cpus := runtime.NumCPU()
	runtime.GOMAXPROCS(cpus)
	WorkerPool.Run()
	go processResults(dbMan)
	go func() {
		sigCh := make(chan os.Signal)
		signal.Notify(sigCh, syscall.SIGTERM, syscall.SIGINT)
		// Wait for a signal
		sig := <-sigCh
		log.Print("Signal received. Shutting down.", sig)
		WorkerPool.Add(func(args ...interface{}) interface{} {
Esempio n. 6
0
func pcreate(args []string) (err error) {
	n := lib.Node{}
	var filename string
	if ne(conf.Flags["full"]) {
		filename = (*conf.Flags["full"])
	} else {
		helpf("pcreate requires file path: -full=<u>")
	}

	var filesize int64
	fh, err := os.Open(filename)
	if err != nil {
		handleString(fmt.Sprintf("Error open file: %s\n", err.Error()))
	}
	if fi, _ := fh.Stat(); err == nil {
		filesize = fi.Size()
	}

	chunks := int(filesize / (conf.CHUNK_SIZE))
	if filesize%conf.CHUNK_SIZE != 0 {
		chunks += 1
	}

	if chunks == 1 {
		opts := lib.Opts{}
		opts["upload_type"] = "full"
		opts["full"] = filename
		if err := n.Create(opts); err != nil {
			handleString(fmt.Sprintf("Error creating node: %s\n", err.Error()))
		} else {
			n.PP()
		}
	} else {
		threads, _ := strconv.Atoi(*conf.Flags["threads"])
		if threads == 0 {
			threads = 1
		}

		//create node
		opts := lib.Opts{}
		opts["upload_type"] = "parts"
		opts["parts"] = strconv.Itoa(chunks)
		if err := n.Create(opts); err != nil {
			handleString(fmt.Sprintf("Error creating node: %s\n", err.Error()))
		}

		workers := pool.New(threads)
		workers.Run()
		for i := 0; i < chunks; i++ {
			size := int64(conf.CHUNK_SIZE)
			if size*(int64(i)+1) > filesize {
				size = filesize - size*(int64(i))
			}
			workers.Add(uploader, n, (i + 1), fh, size)
		}
		workers.Wait()
		maxRetries := 10
		for i := 1; i <= maxRetries; i++ {
			errCount := 0
			completed_jobs := workers.Results()
			for _, job := range completed_jobs {
				if job.Result != nil {
					err := job.Result.(error)
					println("Chunk", job.Args[1].(int), "error:", err.Error())
					workers.Add(job.F, job.Args...)
					errCount++
				}
			}
			if errCount == 0 {
				println("All chunks successfully upload.")
				break
			} else {
				println("Retry", i, "of", maxRetries)
				workers.Wait()
			}
		}
		workers.Stop()

		n.Get()
		n.PP()
	}
	return
}