func cleanFiles(paths []string) { var wg sync.WaitGroup wg.Add(len(paths)) p := pool.NewPool(config.Parallelism, func(id uint, payload interface{}) interface{} { defer wg.Done() path := payload.(string) debug("Worker", id, "processing", path) file, err := ParseFile(path) if err != nil { debug("Worker", id, "error parsing", path, "-", err) return err } err = file.Rewrite() if err != nil { debug("Worker", id, "error rewriting", path, "-", err) return err } return nil }) for _, path := range paths { p.Submit(pool.NewJob(path)) } wg.Wait() }
func TestPoolSerial(t *testing.T) { i := 0 worker := func(id uint, payload interface{}) interface{} { i++ return i } p := pool.NewPool(1, worker) for expected := 1; expected <= 100; expected++ { req := pool.NewJob(struct{}{}) p.Submit(req) if res := req.Result(); res != expected { t.Fatalf("got %#v; want %d", res, expected) } } }
func clean_files(paths []string) { var wg sync.WaitGroup wg.Add(len(paths)) p := pool.NewPool(8, func(id uint, payload interface{}) interface{} { process(payload.(string)) wg.Done() return nil }) for _, path := range paths { p.Submit(pool.NewJob(path)) } wg.Wait() }
func testPoolParallel(t *testing.T, num_workers uint, num_req int) { runtime.GOMAXPROCS(4) worker := func(id uint, payload interface{}) interface{} { return id } p := pool.NewPool(num_workers, worker) ids := make(chan uint, num_req) go func() { for j := 0; j < num_req; j++ { req := pool.NewJob(struct{}{}) p.Submit(req) worker_id := req.Result().(uint) ids <- worker_id } close(ids) }() stats := make(map[uint]uint) for id := range ids { if count, ok := stats[id]; ok { stats[id] = count + 1 } else { stats[id] = 1 } } fair := uint(num_req) / num_workers deviation := uint(num_req) / 100 for id, count := range stats { if count < fair-deviation && count > fair+deviation { t.Fatalf("worker %d processed %d jobs, expected %d ± %d", id, count, fair, deviation) } } runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) }