func (sch *aioScheduler) wait_for_completions() { for { b := make([]byte, 8) n, err := sch.eventfd.Read(b) if err != nil { panic(err) } if n != 8 { panic("Read less than 8 bytes from events") } evc := int(binary.LittleEndian.Uint64(b)) events := make([]C.io_event_t, evc) for count := 0; count < evc; { toRead := C.long(evc - count) r := C.io_getevents(sch.ctx, toRead, toRead, &events[count], nil) if r >= 0 { count += int(r) } else { err := syscall.Errno(-r) if err != syscall.EINTR { panic("io_getevents failed") } } } sch.mu.Lock() for _, event := range events { iocb := (*C.iocb_t)(event.obj) if ev, found := sch.reqs[iocb]; found { ev.set(int64(event.res)) delete(sch.reqs, iocb) } else { println("not found") } } sch.mu.Unlock() } }
func run() { runtime.LockOSThread() events := make([]C.struct_io_event, max_event_size, max_event_size) var time_out C.struct_timespec = C.struct_timespec{0, 0} tick := time.Tick(1 * time.Microsecond) for { select { case <-have_aio_event: case <-tick: } aio_lock.Lock() if aiocount == 0 { aio_lock.Unlock() continue } n := C.io_getevents(ctx, C.long(1), C.long(max_event_size), &events[0], &time_out) if n > 0 { aiocount -= int(n) } aio_lock.Unlock() if n <= 0 { continue } wg := &sync.WaitGroup{} wg.Add(int(n)) for i := 0; i < int(n); i++ { go func(idx int) { defer wg.Done() var cb *C.struct_iocb = events[idx].obj iic := C.get_iic_from_iocb(cb) key := *(*uint)(unsafe.Pointer(cb.data)) retch := aio_result_map[key] if C.int(events[idx].res2) != 0 { retch <- aio_result{nil, 0, errors.New("aio error")} return } switch (*cb).aio_lio_opcode { case C.IO_CMD_PREAD: buf := C.GoBytes(unsafe.Pointer((*iic).buf), C.int(events[idx].res)) retch <- aio_result{buf, int(events[idx].res), nil} case C.IO_CMD_PWRITE: retch <- aio_result{nil, int(events[idx].res), nil} default: retch <- aio_result{nil, 0, errors.New("unk type")} } }(i) } wg.Wait() } }