mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
aio: Get rid of qemu_aio_flush()
There are no remaining users, and new users should probably be using bdrv_drain_all() in the first place. Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
d318aea932
commit
c57b6656c3
5
async.c
5
async.c
@ -215,8 +215,3 @@ void aio_context_unref(AioContext *ctx)
|
||||
{
|
||||
g_source_unref(&ctx->source);
|
||||
}
|
||||
|
||||
void aio_flush(AioContext *ctx)
|
||||
{
|
||||
while (aio_poll(ctx, true));
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ static void coroutine_fn commit_run(void *opaque)
|
||||
|
||||
wait:
|
||||
/* Note that even when no rate limit is applied we need to yield
|
||||
* with no pending I/O here so that qemu_aio_flush() returns.
|
||||
* with no pending I/O here so that bdrv_drain_all() returns.
|
||||
*/
|
||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
||||
if (block_job_is_cancelled(&s->common)) {
|
||||
|
@ -205,7 +205,7 @@ static void coroutine_fn mirror_run(void *opaque)
|
||||
}
|
||||
|
||||
/* Note that even when no rate limit is applied we need to yield
|
||||
* with no pending I/O here so that qemu_aio_flush() returns.
|
||||
* with no pending I/O here so that bdrv_drain_all() returns.
|
||||
*/
|
||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
||||
if (block_job_is_cancelled(&s->common)) {
|
||||
|
@ -108,7 +108,7 @@ static void coroutine_fn stream_run(void *opaque)
|
||||
|
||||
wait:
|
||||
/* Note that even when no rate limit is applied we need to yield
|
||||
* with no pending I/O here so that qemu_aio_flush() returns.
|
||||
* with no pending I/O here so that bdrv_drain_all() returns.
|
||||
*/
|
||||
block_job_sleep_ns(&s->common, rt_clock, delay_ns);
|
||||
if (block_job_is_cancelled(&s->common)) {
|
||||
|
@ -432,11 +432,6 @@ QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
|
||||
return aio_bh_new(qemu_aio_context, cb, opaque);
|
||||
}
|
||||
|
||||
void qemu_aio_flush(void)
|
||||
{
|
||||
aio_flush(qemu_aio_context);
|
||||
}
|
||||
|
||||
bool qemu_aio_wait(void)
|
||||
{
|
||||
return aio_poll(qemu_aio_context, true);
|
||||
|
@ -162,10 +162,6 @@ void qemu_bh_cancel(QEMUBH *bh);
|
||||
*/
|
||||
void qemu_bh_delete(QEMUBH *bh);
|
||||
|
||||
/* Flush any pending AIO operation. This function will block until all
|
||||
* outstanding AIO operations have been completed or cancelled. */
|
||||
void aio_flush(AioContext *ctx);
|
||||
|
||||
/* Return whether there are any pending callbacks from the GSource
|
||||
* attached to the AioContext.
|
||||
*
|
||||
@ -196,7 +192,7 @@ typedef int (AioFlushHandler)(void *opaque);
|
||||
|
||||
/* Register a file descriptor and associated callbacks. Behaves very similarly
|
||||
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
|
||||
* be invoked when using either qemu_aio_wait() or qemu_aio_flush().
|
||||
* be invoked when using qemu_aio_wait().
|
||||
*
|
||||
* Code that invokes AIO completion functions should rely on this function
|
||||
* instead of qemu_set_fd_handler[2].
|
||||
@ -211,7 +207,7 @@ void aio_set_fd_handler(AioContext *ctx,
|
||||
|
||||
/* Register an event notifier and associated callbacks. Behaves very similarly
|
||||
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
|
||||
* will be invoked when using either qemu_aio_wait() or qemu_aio_flush().
|
||||
* will be invoked when using qemu_aio_wait().
|
||||
*
|
||||
* Code that invokes AIO completion functions should rely on this function
|
||||
* instead of event_notifier_set_handler.
|
||||
@ -228,7 +224,6 @@ GSource *aio_get_g_source(AioContext *ctx);
|
||||
|
||||
/* Functions to operate on the main QEMU AioContext. */
|
||||
|
||||
void qemu_aio_flush(void);
|
||||
bool qemu_aio_wait(void);
|
||||
void qemu_aio_set_event_notifier(EventNotifier *notifier,
|
||||
EventNotifierHandler *io_read,
|
||||
|
Loading…
Reference in New Issue
Block a user