mirror of
https://github.com/xemu-project/xemu.git
synced 2024-12-18 01:08:51 +00:00
cfe29d8294
When draining a block node, we recurse to its parent and for subtree drains also to its children. A single AIO_WAIT_WHILE() is then used to wait for bdrv_drain_poll() to become true, which depends on all of the nodes we recursed to. However, if the respective child or parent becomes quiescent and calls bdrv_wakeup(), only the AioWait of the child/parent is checked, while AIO_WAIT_WHILE() depends on the AioWait of the original node. Fix this by using a single AioWait for all callers of AIO_WAIT_WHILE(). This may mean that the draining thread gets a few more unnecessary wakeups because an unrelated operation got completed, but we already wake it up when something _could_ have changed rather than only if it has certainly changed. Apart from that, drain is a slow path anyway. In theory it would be possible to use wakeups more selectively and still correctly, but the gains are likely not worth the additional complexity. In fact, this patch is a nice simplification for some places in the code. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com>
73 lines
2.1 KiB
C
73 lines
2.1 KiB
C
/*
|
|
* AioContext wait support
|
|
*
|
|
* Copyright (C) 2018 Red Hat, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/main-loop.h"
|
|
#include "block/aio-wait.h"
|
|
|
|
AioWait global_aio_wait;
|
|
|
|
static void dummy_bh_cb(void *opaque)
|
|
{
|
|
/* The point is to make AIO_WAIT_WHILE()'s aio_poll() return */
|
|
}
|
|
|
|
void aio_wait_kick(void)
|
|
{
|
|
/* The barrier (or an atomic op) is in the caller. */
|
|
if (atomic_read(&global_aio_wait.num_waiters)) {
|
|
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
|
|
}
|
|
}
|
|
|
|
typedef struct {
|
|
bool done;
|
|
QEMUBHFunc *cb;
|
|
void *opaque;
|
|
} AioWaitBHData;
|
|
|
|
/* Context: BH in IOThread */
|
|
static void aio_wait_bh(void *opaque)
|
|
{
|
|
AioWaitBHData *data = opaque;
|
|
|
|
data->cb(data->opaque);
|
|
|
|
data->done = true;
|
|
aio_wait_kick();
|
|
}
|
|
|
|
void aio_wait_bh_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
|
|
{
|
|
AioWaitBHData data = {
|
|
.cb = cb,
|
|
.opaque = opaque,
|
|
};
|
|
|
|
assert(qemu_get_current_aio_context() == qemu_get_aio_context());
|
|
|
|
aio_bh_schedule_oneshot(ctx, aio_wait_bh, &data);
|
|
AIO_WAIT_WHILE(ctx, !data.done);
|
|
}
|