This patch adds the callback function to the ThreadletWork structure and moves aio handler as a callback function.
Signed-off-by: Arun R Bharadwaj <a...@linux.vnet.ibm.com> --- posix-aio-compat.c | 88 +++++++++++++++++++++++++++++----------------------- 1 files changed, 49 insertions(+), 39 deletions(-) diff --git a/posix-aio-compat.c b/posix-aio-compat.c index de52eb5..0a4d82b 100644 --- a/posix-aio-compat.c +++ b/posix-aio-compat.c @@ -36,6 +36,7 @@ static QemuCond aiocb_completion; typedef struct ThreadletWork { QTAILQ_ENTRY(ThreadletWork) node; + void (*func)(struct ThreadletWork *work); } ThreadletWork; struct qemu_paiocb { @@ -308,18 +309,14 @@ static ssize_t handle_aiocb_rw(struct qemu_paiocb *aiocb) return nbytes; } -static void *aio_thread(void *unused) +static void *threadlet_worker(void *data) { - pid_t pid; - ThreadletWork *work; - - pid = getpid(); while (1) { - struct qemu_paiocb *aiocb; ssize_t ret = 0; qemu_timeval tv; struct timespec ts; + ThreadletWork *work; qemu_gettimeofday(&tv); ts.tv_sec = tv.tv_sec + 10; @@ -332,52 +329,63 @@ static void *aio_thread(void *unused) ret = cond_timedwait(&cond, &lock, &ts); } - if (QTAILQ_EMPTY(&request_list)) + if (QTAILQ_EMPTY(&request_list)) { + idle_threads--; + cur_threads--; + mutex_unlock(&lock); break; - + } work = QTAILQ_FIRST(&request_list); QTAILQ_REMOVE(&request_list, work, node); - - aiocb = container_of(work, struct qemu_paiocb, work); - - aiocb->active = 1; idle_threads--; mutex_unlock(&lock); - switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { - case QEMU_AIO_READ: - case QEMU_AIO_WRITE: - ret = handle_aiocb_rw(aiocb); - break; - case QEMU_AIO_FLUSH: - ret = handle_aiocb_flush(aiocb); - break; - case QEMU_AIO_IOCTL: - ret = handle_aiocb_ioctl(aiocb); - break; - default: - fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type); - ret = -EINVAL; - break; - } - + work->func(work); mutex_lock(&lock); idle_threads++; mutex_unlock(&lock); - qemu_mutex_lock(&aiocb_mutex); - aiocb->ret = ret; - qemu_cond_broadcast(&aiocb_completion); - qemu_mutex_unlock(&aiocb_mutex); + } + + return NULL; +} + +static void aio_thread(ThreadletWork *work) +{ + pid_t pid; + ssize_t ret = 0; + struct qemu_paiocb *aiocb; + + pid = getpid(); + aiocb = container_of(work, struct qemu_paiocb, work); + aiocb->active = 1; - if (kill(pid, aiocb->ev_signo)) die("kill failed"); + switch (aiocb->aio_type & QEMU_AIO_TYPE_MASK) { + case QEMU_AIO_READ: + case QEMU_AIO_WRITE: + ret = handle_aiocb_rw(aiocb); + break; + case QEMU_AIO_FLUSH: + ret = handle_aiocb_flush(aiocb); + break; + case QEMU_AIO_IOCTL: + ret = handle_aiocb_ioctl(aiocb); + break; + default: + fprintf(stderr, "invalid aio request (0x%x)\n", aiocb->aio_type); + ret = -EINVAL; + break; } - idle_threads--; - cur_threads--; - mutex_unlock(&lock); + qemu_mutex_lock(&aiocb_mutex); + aiocb->ret = ret; + qemu_cond_broadcast(&aiocb_completion); + qemu_mutex_unlock(&aiocb_mutex); - return NULL; + if (kill(pid, aiocb->ev_signo)) { + die("kill failed"); + } + return; } static void spawn_thread(void) @@ -391,7 +399,7 @@ static void spawn_thread(void) if (sigfillset(&set)) die("sigfillset"); if (sigprocmask(SIG_SETMASK, &set, &oldset)) die("sigprocmask"); - thread_create(&thread_id, &attr, aio_thread, NULL); + thread_create(&thread_id, &attr, threadlet_worker, NULL); if (sigprocmask(SIG_SETMASK, &oldset, NULL)) die("sigprocmask restore"); } @@ -406,6 +414,8 @@ static void qemu_paio_submit(struct qemu_paiocb *aiocb) mutex_lock(&lock); if (idle_threads == 0 && cur_threads < max_threads) spawn_thread(); + + aiocb->work.func = aio_thread; QTAILQ_INSERT_TAIL(&request_list, &aiocb->work, node); mutex_unlock(&lock); cond_signal(&cond);