From d1a9cbefe7e875456e26a290a612caadd2db6440 Mon Sep 17 00:00:00 2001 From: Jeffrey Cody Date: Wed, 21 Mar 2012 21:55:01 +0100 Subject: [PATCH 34/55] block: convert qemu_aio_flush() calls to bdrv_drain_all() RH-Author: Jeffrey Cody Message-id: <1572fd75658a8a4b426093b5d501b1eccff865a5.1332362400.git.jcody@redhat.com> Patchwork-id: 38883 O-Subject: [RHEL6.3 qemu-kvm PATCH v8 34/54] block: convert qemu_aio_flush() calls to bdrv_drain_all() Bugzilla: 582475 RH-Acked-by: Paolo Bonzini RH-Acked-by: Marcelo Tosatti RH-Acked-by: Kevin Wolf From: Stefan Hajnoczi Many places in QEMU call qemu_aio_flush() to complete all pending asynchronous I/O. Most of these places actually want to drain all block requests but there is no block layer API to do so. This patch introduces the bdrv_drain_all() API to wait for requests across all BlockDriverStates to complete. As a bonus we perform checks after qemu_aio_wait() to ensure that requests really have finished. Signed-off-by: Stefan Hajnoczi Signed-off-by: Kevin Wolf (cherry picked from commit 922453bca6a927bb527068ae8679d587cfa45dbc) Signed-off-by: Stefan Hajnoczi Signed-off-by: Anthony Liguori Signed-off-by: Jeff Cody --- block.c | 18 ++++++++++++++++++ block.h | 1 + blockdev.c | 4 ++-- hw/ide/macio.c | 5 +++-- hw/ide/pci.c | 2 +- hw/virtio-blk.c | 2 +- migration.c | 2 +- qemu-io.c | 4 ++-- savevm.c | 4 ++-- 9 files changed, 31 insertions(+), 11 deletions(-) Signed-off-by: Michal Novotny --- block.c | 18 ++++++++++++++++++ block.h | 1 + blockdev.c | 4 ++-- hw/ide/macio.c | 5 +++-- hw/ide/pci.c | 2 +- hw/virtio-blk.c | 2 +- migration.c | 2 +- qemu-io.c | 4 ++-- savevm.c | 4 ++-- 9 files changed, 31 insertions(+), 11 deletions(-) diff --git a/block.c b/block.c index 1cb1a51..eeef65e 100644 --- a/block.c +++ b/block.c @@ -711,6 +711,24 @@ void bdrv_close_all(void) } } +/* + * Wait for pending requests to complete across all BlockDriverStates + * + * This function does not flush data to disk, use bdrv_flush_all() for that + * after calling this function. + */ +void bdrv_drain_all(void) +{ + BlockDriverState *bs; + + qemu_aio_flush(); + + /* If requests are still pending there is a bug somewhere */ + QTAILQ_FOREACH(bs, &bdrv_states, list) { + assert(QLIST_EMPTY(&bs->tracked_requests)); + } +} + /* make a BlockDriverState anonymous by removing from bdrv_state list. Also, NULL terminate the device_name to prevent double remove */ void bdrv_make_anon(BlockDriverState *bs) diff --git a/block.h b/block.h index 9b4a3a5..e935392 100644 --- a/block.h +++ b/block.h @@ -212,6 +212,7 @@ int bdrv_flush(BlockDriverState *bs); int coroutine_fn bdrv_co_flush(BlockDriverState *bs); void bdrv_flush_all(void); void bdrv_close_all(void); +void bdrv_drain_all(void); int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); diff --git a/blockdev.c b/blockdev.c index d84f401..1882a71 100644 --- a/blockdev.c +++ b/blockdev.c @@ -676,7 +676,7 @@ int do_snapshot_blkdev(Monitor *mon, const QDict *qdict, QObject **ret_data) goto out; } - qemu_aio_flush(); + bdrv_drain_all(); bdrv_flush(bs); bdrv_close(bs); @@ -936,7 +936,7 @@ int do_drive_del(Monitor *mon, const QDict *qdict, QObject **ret_data) } /* quiesce block driver; prevent further io */ - qemu_aio_flush(); + bdrv_drain_all(); bdrv_flush(bs); bdrv_close(bs); diff --git a/hw/ide/macio.c b/hw/ide/macio.c index a9268cc..a0829a6 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -189,8 +189,9 @@ static void pmac_ide_flush(DBDMA_io *io) { MACIOIDEState *m = io->opaque; - if (m->aiocb) - qemu_aio_flush(); + if (m->aiocb) { + bdrv_drain_all(); + } } /* PowerMac IDE memory IO */ diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 7f61ada..3da0d1d 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -53,7 +53,7 @@ void bmdma_cmd_writeb(void *opaque, uint32_t addr, uint32_t val) * aio operation with preadv/pwritev. */ if (bm->aiocb) { - qemu_aio_flush(); + bdrv_drain_all(); if (bm->aiocb) printf("ide_dma_cancel: aiocb still pending\n"); if (bm->status & BM_STATUS_DMAING) diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c index 05209c6..218bd7b 100644 --- a/hw/virtio-blk.c +++ b/hw/virtio-blk.c @@ -475,7 +475,7 @@ static void virtio_blk_reset(VirtIODevice *vdev) * This should cancel pending requests, but can't do nicely until there * are per-device request lists. */ - qemu_aio_flush(); + bdrv_drain_all(); } /* coalesce internal state, copy to pci i/o region 0 diff --git a/migration.c b/migration.c index 81f79fc..2b99252 100644 --- a/migration.c +++ b/migration.c @@ -400,7 +400,7 @@ void migrate_fd_put_ready(void *opaque) qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); - qemu_aio_flush(); + bdrv_drain_all(); bdrv_flush_all(); if ((qemu_savevm_state_complete(s->mon, s->file)) < 0) { if (old_vm_running) { diff --git a/qemu-io.c b/qemu-io.c index f82f209..2b0e4a9 100644 --- a/qemu-io.c +++ b/qemu-io.c @@ -1523,9 +1523,9 @@ int main(int argc, char **argv) command_loop(); /* - * Make sure all outstanding requests get flushed the program exits. + * Make sure all outstanding requests complete before the program exits. */ - qemu_aio_flush(); + bdrv_drain_all(); if (bs) { bdrv_close(bs); diff --git a/savevm.c b/savevm.c index db43dab..22cfffd 100644 --- a/savevm.c +++ b/savevm.c @@ -1968,7 +1968,7 @@ void do_savevm(Monitor *mon, const QDict *qdict) return; } /* ??? Should this occur after vm_stop? */ - qemu_aio_flush(); + bdrv_drain_all(); saved_vm_running = runstate_is_running(); vm_stop(RUN_STATE_SAVE_VM); @@ -2064,7 +2064,7 @@ int load_vmstate(const char *name) } /* Flush all IO requests so they don't interfere with the new state. */ - qemu_aio_flush(); + bdrv_drain_all(); bs1 = NULL; while ((bs1 = bdrv_next(bs1))) { -- 1.7.7.6