func createWorkerPool() { numCPUs := runtime.NumCPU() runtime.GOMAXPROCS(numCPUs) workerPool, _ = tunny.CreatePool(numCPUs, func(data interface{}) interface{} { input, _ := data.([]byte) osmData(input) return 1 }).Open() }
func Initialize(srcs []Downloadable, threads int) (d *DownloadEngine, err error) { if srcs == nil || threads <= 0 { return nil, errors.New("Неверный аргумент при инициализации загрузчика") } d = new(DownloadEngine) d.registerQMLTypes() d.srcs = srcs d.engine = qml.NewEngine() component, err := d.engine.LoadFile("qrc:///qml/downloadgui.qml") if err != nil { log.Panicln(err) } d.engine.Context().SetVar("engine", d) d.mainwindow = component.CreateWindow(nil) d.mainwindow.Show() model := d.mainwindow.Root().ObjectByName("filetable").ObjectByName("dllist") d.threadPool, err = tunny.CreatePool(threads, func(object interface{}) interface{} { dlo := object.(Downloadable) d.completedl = append(d.completedl, 0) d.totaldl = append(d.totaldl, 0) model.Call("appendStruct", &DisplayableItem{Fname: filepath.Base(dlo.ActualPath()), Dlspeed: "0B/s", Dlprogress: 0.0}) item := model.Call("back").(qml.Object) index := model.Int("count") - 1 dlo.Progress(func(p curl.ProgressStatus) { if p.Size != 0 && d.completedl[index] != p.Size { d.completedl[index] = p.Size } if p.ContentLength != 0 && d.totaldl[index] != p.ContentLength { d.totaldl[index] = p.ContentLength } //when download completes, percents sets to 0 if p.Percent != 0 { item.Set("dlprogress", p.Percent) } else { item.Set("dlprogress", 1) d.completedl[index] = d.totaldl[index] } item.Set("dlspeed", curl.PrettySpeedString(p.Speed)) d.updateTotalProgress() }, 100*time.Millisecond) _, err := object.(Downloadable).Do() if err != nil { /*dialogboxes.ShowErrorDialog(err.Error())*/ log.Println(err.Error()) } return nil }).Open() if err != nil { log.Panicln(err) } for _, v := range srcs { d.threadPool.SendWorkAsync(v, func(interface{}, error) {}) } return }
func (h *HostUptimeChecker) Init(workers, triggerLimit, timeout int, hostList map[string]HostData, failureCallback func(HostHealthReport), upCallback func(HostHealthReport), pingCallback func(HostHealthReport)) { h.sampleCache = cache.New(30*time.Second, 5*time.Second) h.stopPollingChan = make(chan bool) h.errorChan = make(chan HostHealthReport) h.okChan = make(chan HostHealthReport) h.HostList = hostList h.unHealthyList = make(map[string]bool) h.failureCallback = failureCallback h.upCallback = upCallback h.pingCallback = pingCallback h.workerPoolSize = workers if workers == 0 { h.workerPoolSize = defaultWorkerPoolSize } h.sampleTriggerLimit = triggerLimit if triggerLimit == 0 { h.sampleTriggerLimit = defaultSampletTriggerLimit } h.checkTimout = timeout if timeout == 0 { h.checkTimout = defaultTimeout } log.Debug("[HOST CHECKER] Config:TriggerLimit: ", h.sampleTriggerLimit) log.Debug("[HOST CHECKER] Config:Timeout: ~", h.checkTimout) log.Debug("[HOST CHECKER] Config:WorkerPool: ", h.workerPoolSize) var pErr error h.pool, pErr = tunny.CreatePool(h.workerPoolSize, func(hostData interface{}) interface{} { input, _ := hostData.(HostData) h.CheckHost(input) return nil }).Open() log.Debug("[HOST CHECKER] Init complete") if pErr != nil { log.Error("[HOST CHECKER POOL] Error: %v\n", pErr) } }
func main() { fmt.Println("test") pool, err := tunny.CreatePool(10, func(input interface{}) interface{} { v := input.(string) time.Sleep(time.Second * time.Duration(5)) log.Println("execute @v:", v) return nil }).Open() utee.Chk(err) messages := make(chan string, 1000000) for i := 0; i < 1000000; i++ { messages <- fmt.Sprint("test", i) } for line := range messages { go pool.SendWorkTimed(5.1*1000, line) } }