mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 03:29:43 +00:00
migration: Use non-atomic ops for clear log bitmap
Since we already have bitmap_mutex to protect either the dirty bitmap or the clear log bitmap, we don't need atomic operations to set/clear/test on the clear log bitmap. Switching all ops from atomic to non-atomic versions, meanwhile touch up the comments to show which lock is in charge. Introduced non-atomic version of bitmap_test_and_clear_atomic(), mostly the same as the atomic version but simplified a few places, e.g. dropped the "old_bits" variable, and also the explicit memory barriers. Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Peter Xu <peterx@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
parent
afed4273b5
commit
cedb70eafb
@ -42,7 +42,8 @@ static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bmap_set: set clear bitmap for the page range
|
||||
* clear_bmap_set: set clear bitmap for the page range. Must be with
|
||||
* bitmap_mutex held.
|
||||
*
|
||||
* @rb: the ramblock to operate on
|
||||
* @start: the start page number
|
||||
@ -55,12 +56,12 @@ static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
|
||||
{
|
||||
uint8_t shift = rb->clear_bmap_shift;
|
||||
|
||||
bitmap_set_atomic(rb->clear_bmap, start >> shift,
|
||||
clear_bmap_size(npages, shift));
|
||||
bitmap_set(rb->clear_bmap, start >> shift, clear_bmap_size(npages, shift));
|
||||
}
|
||||
|
||||
/**
|
||||
* clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
|
||||
* clear_bmap_test_and_clear: test clear bitmap for the page, clear if set.
|
||||
* Must be with bitmap_mutex held.
|
||||
*
|
||||
* @rb: the ramblock to operate on
|
||||
* @page: the page number to check
|
||||
@ -71,7 +72,7 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
|
||||
{
|
||||
uint8_t shift = rb->clear_bmap_shift;
|
||||
|
||||
return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
|
||||
return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
|
||||
}
|
||||
|
||||
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
|
||||
|
@ -53,6 +53,9 @@ struct RAMBlock {
|
||||
* and split clearing of dirty bitmap on the remote node (e.g.,
|
||||
* KVM). The bitmap will be set only when doing global sync.
|
||||
*
|
||||
* It is only used during src side of ram migration, and it is
|
||||
* protected by the global ram_state.bitmap_mutex.
|
||||
*
|
||||
* NOTE: this bitmap is different comparing to the other bitmaps
|
||||
* in that one bit can represent multiple guest pages (which is
|
||||
* decided by the `clear_bmap_shift' variable below). On
|
||||
|
@ -253,6 +253,7 @@ void bitmap_set(unsigned long *map, long i, long len);
|
||||
void bitmap_set_atomic(unsigned long *map, long i, long len);
|
||||
void bitmap_clear(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr);
|
||||
bool bitmap_test_and_clear(unsigned long *map, long start, long nr);
|
||||
void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
|
||||
long nr);
|
||||
unsigned long bitmap_find_next_zero_area(unsigned long *map,
|
||||
|
@ -240,6 +240,51 @@ void bitmap_clear(unsigned long *map, long start, long nr)
|
||||
}
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
const long size = start + nr;
|
||||
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
|
||||
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
|
||||
bool dirty = false;
|
||||
|
||||
assert(start >= 0 && nr >= 0);
|
||||
|
||||
/* First word */
|
||||
if (nr - bits_to_clear > 0) {
|
||||
if ((*p) & mask_to_clear) {
|
||||
dirty = true;
|
||||
}
|
||||
*p &= ~mask_to_clear;
|
||||
nr -= bits_to_clear;
|
||||
bits_to_clear = BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
|
||||
/* Full words */
|
||||
if (bits_to_clear == BITS_PER_LONG) {
|
||||
while (nr >= BITS_PER_LONG) {
|
||||
if (*p) {
|
||||
dirty = true;
|
||||
*p = 0;
|
||||
}
|
||||
nr -= BITS_PER_LONG;
|
||||
p++;
|
||||
}
|
||||
}
|
||||
|
||||
/* Last word */
|
||||
if (nr) {
|
||||
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
|
||||
if ((*p) & mask_to_clear) {
|
||||
dirty = true;
|
||||
}
|
||||
*p &= ~mask_to_clear;
|
||||
}
|
||||
|
||||
return dirty;
|
||||
}
|
||||
|
||||
bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
|
||||
{
|
||||
unsigned long *p = map + BIT_WORD(start);
|
||||
|
Loading…
Reference in New Issue
Block a user