mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-26 21:10:42 +00:00
block-backend: Queue requests while drained
This fixes devices like IDE that can still start new requests from I/O handlers in the CPU thread while the block backend is drained. The basic assumption is that in a drain section, no new requests should be allowed through a BlockBackend (blk_drained_begin/end don't exist, we get drain sections only on the node level). However, there are two special cases where requests should not be queued: 1. Block jobs: We already make sure that block jobs are paused in a drain section, so they won't start new requests. However, if the drain_begin is called on the job's BlockBackend first, it can happen that we deadlock because the job stays busy until it reaches a pause point - which it can't if its requests aren't processed any more. The proper solution here would be to make all requests through the job's filter node instead of using a BlockBackend. For now, just disabling request queuing on the job BlockBackend is simpler. 2. In test cases where making requests through bdrv_* would be cumbersome because we'd need a BdrvChild. As we already got the functionality to disable request queuing from 1., use it in tests, too, for convenience. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
parent
d2da5e288a
commit
cf3129323f
@ -644,6 +644,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
|
||||
if (ret < 0) {
|
||||
goto error;
|
||||
}
|
||||
blk_set_disable_request_queuing(job->target, true);
|
||||
|
||||
job->on_source_error = on_source_error;
|
||||
job->on_target_error = on_target_error;
|
||||
|
@ -79,6 +79,9 @@ struct BlockBackend {
|
||||
QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
|
||||
|
||||
int quiesce_counter;
|
||||
CoQueue queued_requests;
|
||||
bool disable_request_queuing;
|
||||
|
||||
VMChangeStateEntry *vmsh;
|
||||
bool force_allow_inactivate;
|
||||
|
||||
@ -339,6 +342,7 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
|
||||
|
||||
block_acct_init(&blk->stats);
|
||||
|
||||
qemu_co_queue_init(&blk->queued_requests);
|
||||
notifier_list_init(&blk->remove_bs_notifiers);
|
||||
notifier_list_init(&blk->insert_bs_notifiers);
|
||||
QLIST_INIT(&blk->aio_notifiers);
|
||||
@ -1096,6 +1100,11 @@ void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
|
||||
blk->allow_aio_context_change = allow;
|
||||
}
|
||||
|
||||
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
|
||||
{
|
||||
blk->disable_request_queuing = disable;
|
||||
}
|
||||
|
||||
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
||||
size_t size)
|
||||
{
|
||||
@ -1127,13 +1136,24 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
|
||||
{
|
||||
if (blk->quiesce_counter && !blk->disable_request_queuing) {
|
||||
qemu_co_queue_wait(&blk->queued_requests, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
|
||||
unsigned int bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
BlockDriverState *bs;
|
||||
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
/* Call blk_bs() only after waiting, the graph may have changed */
|
||||
bs = blk_bs(blk);
|
||||
trace_blk_co_preadv(blk, bs, offset, bytes, flags);
|
||||
|
||||
ret = blk_check_byte_request(blk, offset, bytes);
|
||||
@ -1159,8 +1179,12 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BlockDriverState *bs = blk_bs(blk);
|
||||
BlockDriverState *bs;
|
||||
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
/* Call blk_bs() only after waiting, the graph may have changed */
|
||||
bs = blk_bs(blk);
|
||||
trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
|
||||
|
||||
ret = blk_check_byte_request(blk, offset, bytes);
|
||||
@ -1349,6 +1373,12 @@ static void blk_aio_read_entry(void *opaque)
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
if (rwco->blk->quiesce_counter) {
|
||||
blk_dec_in_flight(rwco->blk);
|
||||
blk_wait_while_drained(rwco->blk);
|
||||
blk_inc_in_flight(rwco->blk);
|
||||
}
|
||||
|
||||
assert(qiov->size == acb->bytes);
|
||||
rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
|
||||
qiov, rwco->flags);
|
||||
@ -1361,6 +1391,12 @@ static void blk_aio_write_entry(void *opaque)
|
||||
BlkRwCo *rwco = &acb->rwco;
|
||||
QEMUIOVector *qiov = rwco->iobuf;
|
||||
|
||||
if (rwco->blk->quiesce_counter) {
|
||||
blk_dec_in_flight(rwco->blk);
|
||||
blk_wait_while_drained(rwco->blk);
|
||||
blk_inc_in_flight(rwco->blk);
|
||||
}
|
||||
|
||||
assert(!qiov || qiov->size == acb->bytes);
|
||||
rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
|
||||
qiov, rwco->flags);
|
||||
@ -1482,6 +1518,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb)
|
||||
|
||||
int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
|
||||
{
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
@ -1522,7 +1560,11 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
|
||||
|
||||
int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
||||
{
|
||||
int ret = blk_check_byte_request(blk, offset, bytes);
|
||||
int ret;
|
||||
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
ret = blk_check_byte_request(blk, offset, bytes);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -1532,6 +1574,8 @@ int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
|
||||
|
||||
int blk_co_flush(BlockBackend *blk)
|
||||
{
|
||||
blk_wait_while_drained(blk);
|
||||
|
||||
if (!blk_is_available(blk)) {
|
||||
return -ENOMEDIUM;
|
||||
}
|
||||
@ -2232,6 +2276,9 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
|
||||
if (blk->dev_ops && blk->dev_ops->drained_end) {
|
||||
blk->dev_ops->drained_end(blk->dev_opaque);
|
||||
}
|
||||
while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
|
||||
/* Resume all queued requests */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -350,6 +350,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
blk_set_disable_request_queuing(s->base, true);
|
||||
s->base_bs = base;
|
||||
|
||||
/* Required permissions are already taken with block_job_add_bdrv() */
|
||||
@ -358,6 +359,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
blk_set_disable_request_queuing(s->top, true);
|
||||
|
||||
s->backing_file_str = g_strdup(backing_file_str);
|
||||
s->on_error = on_error;
|
||||
|
@ -1636,6 +1636,7 @@ static BlockJob *mirror_start_job(
|
||||
blk_set_force_allow_inactivate(s->target);
|
||||
}
|
||||
blk_set_allow_aio_context_change(s->target, true);
|
||||
blk_set_disable_request_queuing(s->target, true);
|
||||
|
||||
s->replaces = g_strdup(replaces);
|
||||
s->on_source_error = on_source_error;
|
||||
|
@ -445,6 +445,9 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
|
||||
|
||||
bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
|
||||
|
||||
/* Disable request queuing in the BlockBackend to avoid deadlocks on drain:
|
||||
* The job reports that it's busy until it reaches a pause point. */
|
||||
blk_set_disable_request_queuing(blk, true);
|
||||
blk_set_allow_aio_context_change(blk, true);
|
||||
|
||||
/* Only set speed when necessary to avoid NotSupported error */
|
||||
|
@ -104,6 +104,7 @@ void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);
|
||||
|
||||
void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow);
|
||||
void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow);
|
||||
void blk_set_disable_request_queuing(BlockBackend *blk, bool disable);
|
||||
void blk_iostatus_enable(BlockBackend *blk);
|
||||
bool blk_iostatus_is_enabled(const BlockBackend *blk);
|
||||
BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk);
|
||||
|
@ -686,6 +686,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread)
|
||||
&error_abort);
|
||||
s = bs->opaque;
|
||||
blk_insert_bs(blk, bs, &error_abort);
|
||||
blk_set_disable_request_queuing(blk, true);
|
||||
|
||||
blk_set_aio_context(blk, ctx_a, &error_abort);
|
||||
aio_context_acquire(ctx_a);
|
||||
|
Loading…
Reference in New Issue
Block a user