mirror of
https://github.com/xemu-project/xemu.git
synced 2025-01-19 10:24:37 +00:00
block/block-copy: reduce intersecting request lock
Currently, block_copy operation lock the whole requested region. But there is no reason to lock clusters, which are already copied, it will disturb other parallel block_copy requests for no reason. Let's instead do the following: Lock only sub-region, which we are going to operate on. Then, after copying all dirty sub-regions, we should wait for intersecting requests block-copy, if they failed, we should retry these new dirty clusters. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Message-Id: <20200311103004.7649-9-vsementsov@virtuozzo.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
parent
8719091f9d
commit
5332e5d210
@ -39,29 +39,72 @@ static BlockCopyInFlightReq *find_conflicting_inflight_req(BlockCopyState *s,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
|
||||
int64_t offset,
|
||||
int64_t bytes)
|
||||
/*
|
||||
* If there are no intersecting requests return false. Otherwise, wait for the
|
||||
* first found intersecting request to finish and return true.
|
||||
*/
|
||||
static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
|
||||
int64_t bytes)
|
||||
{
|
||||
BlockCopyInFlightReq *req;
|
||||
BlockCopyInFlightReq *req = find_conflicting_inflight_req(s, offset, bytes);
|
||||
|
||||
while ((req = find_conflicting_inflight_req(s, offset, bytes))) {
|
||||
qemu_co_queue_wait(&req->wait_queue, NULL);
|
||||
if (!req) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_co_queue_wait(&req->wait_queue, NULL);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called only on full-dirty region */
|
||||
static void block_copy_inflight_req_begin(BlockCopyState *s,
|
||||
BlockCopyInFlightReq *req,
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
assert(!find_conflicting_inflight_req(s, offset, bytes));
|
||||
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
|
||||
s->in_flight_bytes += bytes;
|
||||
|
||||
req->offset = offset;
|
||||
req->bytes = bytes;
|
||||
qemu_co_queue_init(&req->wait_queue);
|
||||
QLIST_INSERT_HEAD(&s->inflight_reqs, req, list);
|
||||
}
|
||||
|
||||
static void coroutine_fn block_copy_inflight_req_end(BlockCopyInFlightReq *req)
|
||||
/*
|
||||
* block_copy_inflight_req_shrink
|
||||
*
|
||||
* Drop the tail of the request to be handled later. Set dirty bits back and
|
||||
* wake up all requests waiting for us (may be some of them are not intersecting
|
||||
* with shrunk request)
|
||||
*/
|
||||
static void coroutine_fn block_copy_inflight_req_shrink(BlockCopyState *s,
|
||||
BlockCopyInFlightReq *req, int64_t new_bytes)
|
||||
{
|
||||
if (new_bytes == req->bytes) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(new_bytes > 0 && new_bytes < req->bytes);
|
||||
|
||||
s->in_flight_bytes -= req->bytes - new_bytes;
|
||||
bdrv_set_dirty_bitmap(s->copy_bitmap,
|
||||
req->offset + new_bytes, req->bytes - new_bytes);
|
||||
|
||||
req->bytes = new_bytes;
|
||||
qemu_co_queue_restart_all(&req->wait_queue);
|
||||
}
|
||||
|
||||
static void coroutine_fn block_copy_inflight_req_end(BlockCopyState *s,
|
||||
BlockCopyInFlightReq *req,
|
||||
int ret)
|
||||
{
|
||||
s->in_flight_bytes -= req->bytes;
|
||||
if (ret < 0) {
|
||||
bdrv_set_dirty_bitmap(s->copy_bitmap, req->offset, req->bytes);
|
||||
}
|
||||
QLIST_REMOVE(req, list);
|
||||
qemu_co_queue_restart_all(&req->wait_queue);
|
||||
}
|
||||
@ -357,12 +400,19 @@ int64_t block_copy_reset_unallocated(BlockCopyState *s,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int coroutine_fn block_copy(BlockCopyState *s,
|
||||
int64_t offset, int64_t bytes,
|
||||
bool *error_is_read)
|
||||
/*
|
||||
* block_copy_dirty_clusters
|
||||
*
|
||||
* Copy dirty clusters in @offset/@bytes range.
|
||||
* Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
|
||||
* clusters found and -errno on failure.
|
||||
*/
|
||||
static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s,
|
||||
int64_t offset, int64_t bytes,
|
||||
bool *error_is_read)
|
||||
{
|
||||
int ret = 0;
|
||||
BlockCopyInFlightReq req;
|
||||
bool found_dirty = false;
|
||||
|
||||
/*
|
||||
* block_copy() user is responsible for keeping source and target in same
|
||||
@ -374,10 +424,8 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
||||
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
|
||||
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
|
||||
|
||||
block_copy_wait_inflight_reqs(s, offset, bytes);
|
||||
block_copy_inflight_req_begin(s, &req, offset, bytes);
|
||||
|
||||
while (bytes) {
|
||||
BlockCopyInFlightReq req;
|
||||
int64_t next_zero, cur_bytes, status_bytes;
|
||||
|
||||
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) {
|
||||
@ -387,6 +435,8 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
||||
continue; /* already copied */
|
||||
}
|
||||
|
||||
found_dirty = true;
|
||||
|
||||
cur_bytes = MIN(bytes, s->copy_size);
|
||||
|
||||
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, offset,
|
||||
@ -396,10 +446,14 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
||||
assert(next_zero < offset + cur_bytes); /* no need to do MIN() */
|
||||
cur_bytes = next_zero - offset;
|
||||
}
|
||||
block_copy_inflight_req_begin(s, &req, offset, cur_bytes);
|
||||
|
||||
ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes);
|
||||
assert(ret >= 0); /* never fail */
|
||||
cur_bytes = MIN(cur_bytes, status_bytes);
|
||||
block_copy_inflight_req_shrink(s, &req, cur_bytes);
|
||||
if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) {
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, status_bytes);
|
||||
block_copy_inflight_req_end(s, &req, 0);
|
||||
progress_set_remaining(s->progress,
|
||||
bdrv_get_dirty_count(s->copy_bitmap) +
|
||||
s->in_flight_bytes);
|
||||
@ -409,21 +463,15 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
||||
continue;
|
||||
}
|
||||
|
||||
cur_bytes = MIN(cur_bytes, status_bytes);
|
||||
|
||||
trace_block_copy_process(s, offset);
|
||||
|
||||
bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
|
||||
s->in_flight_bytes += cur_bytes;
|
||||
|
||||
co_get_from_shres(s->mem, cur_bytes);
|
||||
ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO,
|
||||
error_is_read);
|
||||
co_put_to_shres(s->mem, cur_bytes);
|
||||
s->in_flight_bytes -= cur_bytes;
|
||||
block_copy_inflight_req_end(s, &req, ret);
|
||||
if (ret < 0) {
|
||||
bdrv_set_dirty_bitmap(s->copy_bitmap, offset, cur_bytes);
|
||||
break;
|
||||
return ret;
|
||||
}
|
||||
|
||||
progress_work_done(s->progress, cur_bytes);
|
||||
@ -432,7 +480,40 @@ int coroutine_fn block_copy(BlockCopyState *s,
|
||||
bytes -= cur_bytes;
|
||||
}
|
||||
|
||||
block_copy_inflight_req_end(&req);
|
||||
return found_dirty;
|
||||
}
|
||||
|
||||
/*
|
||||
* block_copy
|
||||
*
|
||||
* Copy requested region, accordingly to dirty bitmap.
|
||||
* Collaborate with parallel block_copy requests: if they succeed it will help
|
||||
* us. If they fail, we will retry not-copied regions. So, if we return error,
|
||||
* it means that some I/O operation failed in context of _this_ block_copy call,
|
||||
* not some parallel operation.
|
||||
*/
|
||||
int coroutine_fn block_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
|
||||
bool *error_is_read)
|
||||
{
|
||||
int ret;
|
||||
|
||||
do {
|
||||
ret = block_copy_dirty_clusters(s, offset, bytes, error_is_read);
|
||||
|
||||
if (ret == 0) {
|
||||
ret = block_copy_wait_one(s, offset, bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* We retry in two cases:
|
||||
* 1. Some progress done
|
||||
* Something was copied, which means that there were yield points
|
||||
* and some new dirty bits may have appeared (due to failed parallel
|
||||
* block-copy requests).
|
||||
* 2. We have waited for some intersecting block-copy request
|
||||
* It may have failed and produced new dirty bits.
|
||||
*/
|
||||
} while (ret > 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user