mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-24 12:09:58 +00:00
dirty-bitmap: improve bdrv_dirty_bitmap_next_zero
Add bytes parameter to the function, to limit searched range. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
This commit is contained in:
parent
4b9f0b0f7c
commit
76d570dc49
@ -422,7 +422,8 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
|
||||
break;
|
||||
}
|
||||
|
||||
offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset);
|
||||
offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset,
|
||||
UINT64_MAX);
|
||||
if (offset == -1) {
|
||||
hbitmap_set(job->copy_bitmap, cluster, end - cluster);
|
||||
break;
|
||||
|
@ -781,9 +781,10 @@ char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp)
|
||||
return hbitmap_sha256(bitmap->bitmap, errp);
|
||||
}
|
||||
|
||||
int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset)
|
||||
int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
|
||||
uint64_t bytes)
|
||||
{
|
||||
return hbitmap_next_zero(bitmap->bitmap, offset);
|
||||
return hbitmap_next_zero(bitmap->bitmap, offset, bytes);
|
||||
}
|
||||
|
||||
void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src,
|
||||
|
@ -99,7 +99,8 @@ bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs);
|
||||
BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs,
|
||||
BdrvDirtyBitmap *bitmap);
|
||||
char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp);
|
||||
int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start);
|
||||
int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset,
|
||||
uint64_t bytes);
|
||||
BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs,
|
||||
BdrvDirtyBitmap *bitmap,
|
||||
Error **errp);
|
||||
|
@ -300,12 +300,16 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first);
|
||||
unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi);
|
||||
|
||||
/* hbitmap_next_zero:
|
||||
*
|
||||
* Find next not dirty bit within selected range. If not found, return -1.
|
||||
*
|
||||
* @hb: The HBitmap to operate on
|
||||
* @start: The bit to start from.
|
||||
*
|
||||
* Find next not dirty bit.
|
||||
* @count: Number of bits to proceed. If @start+@count > bitmap size, the whole
|
||||
* bitmap is looked through. You can use UINT64_MAX as @count to search up to
|
||||
* the bitmap end.
|
||||
*/
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start);
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count);
|
||||
|
||||
/* hbitmap_create_meta:
|
||||
* Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap.
|
||||
|
@ -2014,7 +2014,7 @@ static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset,
|
||||
bool next_dirty = !dirty;
|
||||
|
||||
if (dirty) {
|
||||
end = bdrv_dirty_bitmap_next_zero(bitmap, begin);
|
||||
end = bdrv_dirty_bitmap_next_zero(bitmap, begin, UINT64_MAX);
|
||||
} else {
|
||||
bdrv_set_dirty_iter(it, begin);
|
||||
end = bdrv_dirty_iter_next(it);
|
||||
|
@ -939,7 +939,7 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data,
|
||||
|
||||
static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start)
|
||||
{
|
||||
int64_t ret1 = hbitmap_next_zero(data->hb, start);
|
||||
int64_t ret1 = hbitmap_next_zero(data->hb, start, UINT64_MAX);
|
||||
int64_t ret2 = start;
|
||||
for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) {
|
||||
;
|
||||
|
@ -53,6 +53,9 @@
|
||||
*/
|
||||
|
||||
struct HBitmap {
|
||||
/* Size of the bitmap, as requested in hbitmap_alloc. */
|
||||
uint64_t orig_size;
|
||||
|
||||
/* Number of total bits in the bottom level. */
|
||||
uint64_t size;
|
||||
|
||||
@ -192,16 +195,28 @@ void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
|
||||
}
|
||||
}
|
||||
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
|
||||
int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count)
|
||||
{
|
||||
size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
|
||||
unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
|
||||
uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
|
||||
unsigned long cur = last_lev[pos];
|
||||
unsigned start_bit_offset =
|
||||
(start >> hb->granularity) & (BITS_PER_LONG - 1);
|
||||
unsigned start_bit_offset;
|
||||
uint64_t end_bit, sz;
|
||||
int64_t res;
|
||||
|
||||
if (start >= hb->orig_size || count == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
end_bit = count > hb->orig_size - start ?
|
||||
hb->size :
|
||||
((start + count - 1) >> hb->granularity) + 1;
|
||||
sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL;
|
||||
|
||||
/* There may be some zero bits in @cur before @start. We are not interested
|
||||
* in them, let's set them.
|
||||
*/
|
||||
start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1);
|
||||
cur |= (1UL << start_bit_offset) - 1;
|
||||
assert((start >> hb->granularity) < hb->size);
|
||||
|
||||
@ -218,7 +233,7 @@ int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
|
||||
}
|
||||
|
||||
res = (pos << BITS_PER_LEVEL) + ctol(cur);
|
||||
if (res >= hb->size) {
|
||||
if (res >= end_bit) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -652,6 +667,8 @@ HBitmap *hbitmap_alloc(uint64_t size, int granularity)
|
||||
HBitmap *hb = g_new0(struct HBitmap, 1);
|
||||
unsigned i;
|
||||
|
||||
hb->orig_size = size;
|
||||
|
||||
assert(granularity >= 0 && granularity < 64);
|
||||
size = (size + (1ULL << granularity) - 1) >> granularity;
|
||||
assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
|
||||
|
Loading…
Reference in New Issue
Block a user