From 3f989b7a0f1cb840c4740ebd81f1ddf3567a0e7a Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 14 Feb 2012 11:14:19 +0100 Subject: [PATCH 54/99] async: Remove AsyncContext RH-Author: Kevin Wolf Message-id: <1329218101-24213-55-git-send-email-kwolf@redhat.com> Patchwork-id: 37259 O-Subject: [RHEL-6.3 qemu-kvm PATCH v2 54/96] async: Remove AsyncContext Bugzilla: 783950 RH-Acked-by: Paolo Bonzini RH-Acked-by: Marcelo Tosatti RH-Acked-by: Laszlo Ersek Bugzilla: 783950 The purpose of AsyncContexts was to protect qcow and qcow2 against reentrancy during an emulated bdrv_read/write (which includes a qemu_aio_wait() call and can run AIO callbacks of different requests if it weren't for AsyncContexts). Now both qcow and qcow2 are protected by CoMutexes and AsyncContexts can be removed. Signed-off-by: Kevin Wolf (cherry picked from commit 384acbf46b70edf0d2c1648aa1a92a90bcf7057d) --- async.c | 98 ++++------------------------------------------------ block.c | 6 --- block/qed-table.c | 14 ------- block/qed.c | 4 -- linux-aio.c | 43 ++-------------------- posix-aio-compat.c | 10 ----- qemu-common.h | 4 -- 7 files changed, 11 insertions(+), 168 deletions(-) Signed-off-by: Michal Novotny --- async.c | 98 ++++------------------------------------------------ block.c | 6 --- block/qed-table.c | 14 ------- block/qed.c | 4 -- linux-aio.c | 43 ++-------------------- posix-aio-compat.c | 10 ----- qemu-common.h | 4 -- 7 files changed, 11 insertions(+), 168 deletions(-) diff --git a/async.c b/async.c index fd313df..3fe70b9 100644 --- a/async.c +++ b/async.c @@ -25,92 +25,8 @@ #include "qemu-common.h" #include "qemu-aio.h" -/* - * An AsyncContext protects the callbacks of AIO requests and Bottom Halves - * against interfering with each other. A typical example is qcow2 that accepts - * asynchronous requests, but relies for manipulation of its metadata on - * synchronous bdrv_read/write that doesn't trigger any callbacks. - * - * However, these functions are often emulated using AIO which means that AIO - * callbacks must be run - but at the same time we must not run callbacks of - * other requests as they might start to modify metadata and corrupt the - * internal state of the caller of bdrv_read/write. - * - * To achieve the desired semantics we switch into a new AsyncContext. - * Callbacks must only be run if they belong to the current AsyncContext. - * Otherwise they need to be queued until their own context is active again. - * This is how you can make qemu_aio_wait() wait only for your own callbacks. - * - * The AsyncContexts form a stack. When you leave a AsyncContexts, you always - * return to the old ("parent") context. - */ -struct AsyncContext { - /* Consecutive number of the AsyncContext (position in the stack) */ - int id; - - /* Anchor of the list of Bottom Halves belonging to the context */ - struct QEMUBH *first_bh; - - /* Link to parent context */ - struct AsyncContext *parent; -}; - -/* The currently active AsyncContext */ -static struct AsyncContext *async_context = &(struct AsyncContext) { 0 }; - -/* - * Enter a new AsyncContext. Already scheduled Bottom Halves and AIO callbacks - * won't be called until this context is left again. - */ -void async_context_push(void) -{ - struct AsyncContext *new = qemu_mallocz(sizeof(*new)); - new->parent = async_context; - new->id = async_context->id + 1; - async_context = new; -} - -/* Run queued AIO completions and destroy Bottom Half */ -static void bh_run_aio_completions(void *opaque) -{ - QEMUBH **bh = opaque; - qemu_bh_delete(*bh); - qemu_free(bh); - qemu_aio_process_queue(); -} -/* - * Leave the currently active AsyncContext. All Bottom Halves belonging to the - * old context are executed before changing the context. - */ -void async_context_pop(void) -{ - struct AsyncContext *old = async_context; - QEMUBH **bh; - - /* Flush the bottom halves, we don't want to lose them */ - while (qemu_bh_poll()); - - /* Switch back to the parent context */ - async_context = async_context->parent; - qemu_free(old); - - if (async_context == NULL) { - abort(); - } - - /* Schedule BH to run any queued AIO completions as soon as possible */ - bh = qemu_malloc(sizeof(*bh)); - *bh = qemu_bh_new(bh_run_aio_completions, bh); - qemu_bh_schedule(*bh); -} - -/* - * Returns the ID of the currently active AsyncContext - */ -int get_async_context_id(void) -{ - return async_context->id; -} +/* Anchor of the list of Bottom Halves belonging to the context */ +static struct QEMUBH *first_bh; /***********************************************************/ /* bottom halves (can be seen as timers which expire ASAP) */ @@ -130,8 +46,8 @@ QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque) bh = qemu_mallocz(sizeof(QEMUBH)); bh->cb = cb; bh->opaque = opaque; - bh->next = async_context->first_bh; - async_context->first_bh = bh; + bh->next = first_bh; + first_bh = bh; return bh; } @@ -141,7 +57,7 @@ int qemu_bh_poll(void) int ret; ret = 0; - for (bh = async_context->first_bh; bh; bh = next) { + for (bh = first_bh; bh; bh = next) { next = bh->next; if (!bh->deleted && bh->scheduled) { bh->scheduled = 0; @@ -153,7 +69,7 @@ int qemu_bh_poll(void) } /* remove deleted bhs */ - bhp = &async_context->first_bh; + bhp = &first_bh; while (*bhp) { bh = *bhp; if (bh->deleted) { @@ -199,7 +115,7 @@ void qemu_bh_update_timeout(int *timeout) { QEMUBH *bh; - for (bh = async_context->first_bh; bh; bh = bh->next) { + for (bh = first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { if (bh->idle) { /* idle bottom halves will be polled at least diff --git a/block.c b/block.c index 270ab41..ad19c57 100644 --- a/block.c +++ b/block.c @@ -2714,8 +2714,6 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, struct iovec iov; QEMUIOVector qiov; - async_context_push(); - async_ret = NOT_DONE; iov.iov_base = (void *)buf; iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; @@ -2733,7 +2731,6 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num, fail: - async_context_pop(); return async_ret; } @@ -2745,8 +2742,6 @@ static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, struct iovec iov; QEMUIOVector qiov; - async_context_push(); - async_ret = NOT_DONE; iov.iov_base = (void *)buf; iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE; @@ -2762,7 +2757,6 @@ static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num, } fail: - async_context_pop(); return async_ret; } diff --git a/block/qed-table.c b/block/qed-table.c index d38c673..d96afa8 100644 --- a/block/qed-table.c +++ b/block/qed-table.c @@ -179,16 +179,12 @@ int qed_read_l1_table_sync(BDRVQEDState *s) { int ret = -EINPROGRESS; - async_context_push(); - qed_read_table(s, s->header.l1_table_offset, s->l1_table, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { qemu_aio_wait(); } - async_context_pop(); - return ret; } @@ -205,15 +201,11 @@ int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, { int ret = -EINPROGRESS; - async_context_push(); - qed_write_l1_table(s, index, n, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { qemu_aio_wait(); } - async_context_pop(); - return ret; } @@ -282,14 +274,11 @@ int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset { int ret = -EINPROGRESS; - async_context_push(); - qed_read_l2_table(s, request, offset, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { qemu_aio_wait(); } - async_context_pop(); return ret; } @@ -307,13 +296,10 @@ int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, { int ret = -EINPROGRESS; - async_context_push(); - qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret); while (ret == -EINPROGRESS) { qemu_aio_wait(); } - async_context_pop(); return ret; } diff --git a/block/qed.c b/block/qed.c index 1096d30..6a7faa3 100644 --- a/block/qed.c +++ b/block/qed.c @@ -673,16 +673,12 @@ static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num, }; QEDRequest request = { .l2_table = NULL }; - async_context_push(); - qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb); while (cb.is_allocated == -1) { qemu_aio_wait(); } - async_context_pop(); - qed_unref_l2_cache_entry(request.l2_table); return cb.is_allocated; diff --git a/linux-aio.c b/linux-aio.c index 5e892b0..1af4b49 100644 --- a/linux-aio.c +++ b/linux-aio.c @@ -31,7 +31,6 @@ struct qemu_laiocb { struct iocb iocb; ssize_t ret; size_t nbytes; - int async_context_id; QLIST_ENTRY(qemu_laiocb) node; }; @@ -39,7 +38,6 @@ struct qemu_laio_state { io_context_t ctx; int efd; int count; - QLIST_HEAD(, qemu_laiocb) completed_reqs; }; static inline ssize_t io_event_ret(struct io_event *ev) @@ -49,7 +47,6 @@ static inline ssize_t io_event_ret(struct io_event *ev) /* * Completes an AIO request (calls the callback and frees the ACB). - * Be sure to be in the right AsyncContext before calling this function. */ static void qemu_laio_process_completion(struct qemu_laio_state *s, struct qemu_laiocb *laiocb) @@ -72,42 +69,12 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s, } /* - * Processes all queued AIO requests, i.e. requests that have return from OS - * but their callback was not called yet. Requests that cannot have their - * callback called in the current AsyncContext, remain in the queue. - * - * Returns 1 if at least one request could be completed, 0 otherwise. + * All requests are directly processed when they complete, so there's nothing + * left to do during qemu_aio_wait(). */ static int qemu_laio_process_requests(void *opaque) { - struct qemu_laio_state *s = opaque; - struct qemu_laiocb *laiocb, *next; - int res = 0; - - QLIST_FOREACH_SAFE (laiocb, &s->completed_reqs, node, next) { - if (laiocb->async_context_id == get_async_context_id()) { - qemu_laio_process_completion(s, laiocb); - QLIST_REMOVE(laiocb, node); - res = 1; - } - } - - return res; -} - -/* - * Puts a request in the completion queue so that its callback is called the - * next time when it's possible. If we already are in the right AsyncContext, - * the request is completed immediately instead. - */ -static void qemu_laio_enqueue_completed(struct qemu_laio_state *s, - struct qemu_laiocb* laiocb) -{ - if (laiocb->async_context_id == get_async_context_id()) { - qemu_laio_process_completion(s, laiocb); - } else { - QLIST_INSERT_HEAD(&s->completed_reqs, laiocb, node); - } + return 0; } static void qemu_laio_completion_cb(void *opaque) @@ -141,7 +108,7 @@ static void qemu_laio_completion_cb(void *opaque) container_of(iocb, struct qemu_laiocb, iocb); laiocb->ret = io_event_ret(&events[i]); - qemu_laio_enqueue_completed(s, laiocb); + qemu_laio_process_completion(s, laiocb); } } } @@ -204,7 +171,6 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd, laiocb->nbytes = nb_sectors * 512; laiocb->ctx = s; laiocb->ret = -EINPROGRESS; - laiocb->async_context_id = get_async_context_id(); iocbs = &laiocb->iocb; @@ -239,7 +205,6 @@ void *laio_init(void) struct qemu_laio_state *s; s = qemu_mallocz(sizeof(*s)); - QLIST_INIT(&s->completed_reqs); s->efd = eventfd(0, 0); if (s->efd == -1) goto out_free_state; diff --git a/posix-aio-compat.c b/posix-aio-compat.c index 032fe1d..4b6d3ac 100644 --- a/posix-aio-compat.c +++ b/posix-aio-compat.c @@ -50,8 +50,6 @@ struct qemu_paiocb { ssize_t ret; int active; struct qemu_paiocb *next; - - int async_context_id; }; typedef struct PosixAioState { @@ -423,7 +421,6 @@ static int posix_aio_process_queue(void *opaque) struct qemu_paiocb *acb, **pacb; int ret; int result = 0; - int async_context_id = get_async_context_id(); for(;;) { pacb = &s->first_aio; @@ -432,12 +429,6 @@ static int posix_aio_process_queue(void *opaque) if (!acb) return result; - /* we're only interested in requests in the right context */ - if (acb->async_context_id != async_context_id) { - pacb = &acb->next; - continue; - } - ret = qemu_paio_error(acb); if (ret == ECANCELED) { /* remove the request */ @@ -570,7 +561,6 @@ BlockDriverAIOCB *paio_submit(BlockDriverState *bs, int fd, acb->aio_type = type; acb->aio_fildes = fd; acb->ev_signo = SIGUSR2; - acb->async_context_id = get_async_context_id(); if (qiov) { acb->aio_iov = qiov->iov; diff --git a/qemu-common.h b/qemu-common.h index 0aa319c..b93eeb7 100644 --- a/qemu-common.h +++ b/qemu-common.h @@ -104,10 +104,6 @@ typedef struct QEMUBH QEMUBH; typedef void QEMUBHFunc(void *opaque); -void async_context_push(void); -void async_context_pop(void); -int get_async_context_id(void); - QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque); void qemu_bh_schedule(QEMUBH *bh); /* Bottom halfs that are scheduled from a bottom half handler are instantly -- 1.7.7.5