func cleanFiles(paths []string) { var wg sync.WaitGroup wg.Add(len(paths)) p := pool.NewPool(config.Parallelism, func(id uint, payload interface{}) interface{} { defer wg.Done() path := payload.(string) debug("Worker", id, "processing", path) file, err := ParseFile(path) if err != nil { debug("Worker", id, "error parsing", path, "-", err) return err } err = file.Rewrite() if err != nil { debug("Worker", id, "error rewriting", path, "-", err) return err } return nil }) for _, path := range paths { p.Submit(pool.NewJob(path)) } wg.Wait() }
func TestPoolSerialLimitedWithBurst(t *testing.T) { n := uint(1000) op_per_sec := uint(10000) worker := func(id uint, payload interface{}) interface{} { return true } p := pool.NewRateLimitedPool(1, op_per_sec, 1000, worker) done := uint(0) go func() { time.Sleep(100 * time.Millisecond) for j := uint(0); j < n; j++ { req := pool.NewJob(struct{}{}) p.Submit(req) req.Result() done++ } }() // Processing should happen immediately because we have enough burst capacity // to satisfy the jobs <-time.After(110 * time.Millisecond) if done != n { t.Fatalf("Pool didn't use burst capacity") } }
func TestPoolSerialLimitedWithoutBurst(t *testing.T) { n := uint(1000) op_per_sec := uint(10000) worker := func(id uint, payload interface{}) interface{} { return true } p := pool.NewRateLimitedPool(1, op_per_sec, 0, worker) done := uint(0) go func() { for j := uint(0); j < n; j++ { req := pool.NewJob(struct{}{}) p.Submit(req) req.Result() done++ } }() expected := float32(n) / float32(op_per_sec) * 1000 <-time.After(time.Duration(expected-expected/10) * time.Millisecond) if done == n { t.Fatalf("Pool completed work faster than specified rate") } }
func testPoolParallelLimitedWithoutBurst(t *testing.T, num_workers, num_req uint) { runtime.GOMAXPROCS(4) op_per_sec := uint(10000) worker := func(id uint, payload interface{}) interface{} { return true } p := pool.NewRateLimitedPool(num_workers, op_per_sec, 0, worker) done := uint(0) go func() { for j := uint(0); j < num_req; j++ { req := pool.NewJob(struct{}{}) p.Submit(req) req.Result() done++ } }() expected := float32(num_req) / float32(op_per_sec) * 1000 <-time.After(time.Duration(expected-expected/10) * time.Millisecond) if done == num_req { t.Fatalf("Pool completed work faster than specified rate") } runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) }
func TestPoolSerial(t *testing.T) { i := 0 worker := func(id uint, payload interface{}) interface{} { i++ return i } p := pool.NewPool(1, worker) for expected := 1; expected <= 100; expected++ { req := pool.NewJob(struct{}{}) p.Submit(req) if res := req.Result(); res != expected { t.Fatalf("got %#v; want %d", res, expected) } } }
func clean_files(paths []string) { var wg sync.WaitGroup wg.Add(len(paths)) p := pool.NewPool(8, func(id uint, payload interface{}) interface{} { process(payload.(string)) wg.Done() return nil }) for _, path := range paths { p.Submit(pool.NewJob(path)) } wg.Wait() }
func (cb *citibike) get_all_trips(username string, cache TripCache) (Trips, error) { doc, err := cb.get_trips_document(cb.trips_path) if err != nil { return nil, err } next_page, last_page := parse_navigation(doc, cb.trips_path) var wg sync.WaitGroup wg.Add(last_page - next_page + 1) var jobs []pool.Job for p := next_page; p <= last_page; p++ { job := pool.NewJob(fetchTrips{&wg, p, cb}) tripPool.Submit(job) jobs = append(jobs, job) } wg.Wait() trips := cb.parse_trips(doc) for _, job := range jobs { result := job.Result() switch result.(type) { case Trips: trips = append(trips, result.(Trips)...) case error: return trips, result.(error) default: return trips, errors.New("Unexpected return type from the pool") } } sort.Sort(trips) for _, trip := range trips { cache.PutTrip(username, trip) } return trips, nil }
func testPoolParallel(t *testing.T, num_workers uint, num_req int) { runtime.GOMAXPROCS(4) worker := func(id uint, payload interface{}) interface{} { return id } p := pool.NewPool(num_workers, worker) ids := make(chan uint, num_req) go func() { for j := 0; j < num_req; j++ { req := pool.NewJob(struct{}{}) p.Submit(req) worker_id := req.Result().(uint) ids <- worker_id } close(ids) }() stats := make(map[uint]uint) for id := range ids { if count, ok := stats[id]; ok { stats[id] = count + 1 } else { stats[id] = 1 } } fair := uint(num_req) / num_workers deviation := uint(num_req) / 100 for id, count := range stats { if count < fair-deviation && count > fair+deviation { t.Fatalf("worker %d processed %d jobs, expected %d ± %d", id, count, fair, deviation) } } runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) }