mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
Migration pull 2021-05-13
Fix of the 2021-05-11 version, with a fix to build on the armhf cross. The largest change in this set is David's changes for ram block size changing; then there's a pile of other cleanups and fixes. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEERfXHG0oMt/uXep+pBRYzHrxb/ecFAmCdY4YACgkQBRYzHrxb /eek0RAAsZhgu4uMQ7dJr9wm6adR0wc8iTc1MVCvHj7WFFsHhS3CLIvJ+JA+B20P y4pa57Im2FIKG7h1xbVs9NQHZEfM+TBPKcugHipGP/bsEVKjLhHqhrqEPbGZwuCx 24Vx97mmKxVWb4ppv+6HCixlj0zdQwdqgAZY0Dgdfo0mJtBrHQtBXmD2uN3JLnW4 WtsELqr1BORbyKXu+R71EF2qwON5fANO9fxL56Xvg8klwCECi5ntKdoDBq5lS5oc TWt/M7yvp1kWukclZvbGDFTH9+/ld1W4iE5CaKpX+5Z57Fu8cOzzsNEvJt7G7Yer ceRe7Yf2X+3EJsmllbjmFfsld7v+KzytURCCTX4HaVIiUQV3ARx95fltsEt4Qhyv 7tGgksC1YFT2GxXxR6vi4eHn8Jpi3yb0vTlWndh5qc5KdBDH8x8Mew+uH72b/0f0 7zg7Qjs5qck5gSBYrLYDLb2BgPq+2vHgt1E6BHADvN5HB9OgV9Ls0sG56pvoRpK/ 0s8XuT+R8ETlSbG7jHnXghuQpbupObu5n8AlG1go3wO73GIoJ49nR4Dp0ZyT08bJ LJopNUnlRdshxxofzQTNgy/8p7k+HiO616bjJ6UuOXTOC10p4kVyIS8hHnYCUyuh iPlBjDgSk0v1s5o1eCUmEMh3yfNCGvyePEPyJM6XiGPTZb/yYyI= =RJvy -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20210513a' into staging Migration pull 2021-05-13 Fix of the 2021-05-11 version, with a fix to build on the armhf cross. The largest change in this set is David's changes for ram block size changing; then there's a pile of other cleanups and fixes. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> # gpg: Signature made Thu 13 May 2021 18:36:06 BST # gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7 # gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full] # Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7 * remotes/dgilbert/tags/pull-migration-20210513a: tests/migration: introduce multifd into guestperf tests/qtest/migration-test: Use g_autofree to avoid leaks on error paths tests/migration-test: Fix "true" vs true migration/ram: Use offset_in_ramblock() in range checks migration/multifd: Print used_length of memory block migration/ram: Handle RAM block resizes during postcopy migration/ram: Simplify host page handling in ram_load_postcopy() migration/ram: Discard RAM when growing RAM blocks after ram_postcopy_incoming_init() exec: Relax range check in ram_block_discard_range() migration/ram: Handle RAM block resizes during precopy numa: Make all callbacks of ram block notifiers optional numa: Teach ram block notifiers about resizeable ram blocks util: vfio-helpers: Factor out and fix processing of existing ram blocks migration: Drop redundant query-migrate result @blocked migration/ram: Optimize ram_save_host_page() migration/ram: Reduce unnecessary rate limiting migrate/ram: remove "ram_bulk_stage" and "fpo_enabled" Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
96662996ed
@ -802,9 +802,27 @@ void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms)
|
||||
}
|
||||
}
|
||||
|
||||
static int ram_block_notify_add_single(RAMBlock *rb, void *opaque)
|
||||
{
|
||||
const ram_addr_t max_size = qemu_ram_get_max_length(rb);
|
||||
const ram_addr_t size = qemu_ram_get_used_length(rb);
|
||||
void *host = qemu_ram_get_host_addr(rb);
|
||||
RAMBlockNotifier *notifier = opaque;
|
||||
|
||||
if (host) {
|
||||
notifier->ram_block_added(notifier, host, size, max_size);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ram_block_notifier_add(RAMBlockNotifier *n)
|
||||
{
|
||||
QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next);
|
||||
|
||||
/* Notify about all existing ram blocks. */
|
||||
if (n->ram_block_added) {
|
||||
qemu_ram_foreach_block(ram_block_notify_add_single, n);
|
||||
}
|
||||
}
|
||||
|
||||
void ram_block_notifier_remove(RAMBlockNotifier *n)
|
||||
@ -812,20 +830,35 @@ void ram_block_notifier_remove(RAMBlockNotifier *n)
|
||||
QLIST_REMOVE(n, next);
|
||||
}
|
||||
|
||||
void ram_block_notify_add(void *host, size_t size)
|
||||
void ram_block_notify_add(void *host, size_t size, size_t max_size)
|
||||
{
|
||||
RAMBlockNotifier *notifier;
|
||||
|
||||
QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
|
||||
notifier->ram_block_added(notifier, host, size);
|
||||
if (notifier->ram_block_added) {
|
||||
notifier->ram_block_added(notifier, host, size, max_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ram_block_notify_remove(void *host, size_t size)
|
||||
void ram_block_notify_remove(void *host, size_t size, size_t max_size)
|
||||
{
|
||||
RAMBlockNotifier *notifier;
|
||||
|
||||
QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
|
||||
notifier->ram_block_removed(notifier, host, size);
|
||||
if (notifier->ram_block_removed) {
|
||||
notifier->ram_block_removed(notifier, host, size, max_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ram_block_notify_resize(void *host, size_t old_size, size_t new_size)
|
||||
{
|
||||
RAMBlockNotifier *notifier;
|
||||
|
||||
QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) {
|
||||
if (notifier->ram_block_resized) {
|
||||
notifier->ram_block_resized(notifier, host, old_size, new_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -169,7 +169,8 @@ static void xen_remap_bucket(MapCacheEntry *entry,
|
||||
|
||||
if (entry->vaddr_base != NULL) {
|
||||
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
|
||||
ram_block_notify_remove(entry->vaddr_base, entry->size);
|
||||
ram_block_notify_remove(entry->vaddr_base, entry->size,
|
||||
entry->size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -224,7 +225,7 @@ static void xen_remap_bucket(MapCacheEntry *entry,
|
||||
}
|
||||
|
||||
if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
|
||||
ram_block_notify_add(vaddr_base, size);
|
||||
ram_block_notify_add(vaddr_base, size, size);
|
||||
}
|
||||
|
||||
entry->vaddr_base = vaddr_base;
|
||||
@ -465,7 +466,7 @@ static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
|
||||
}
|
||||
|
||||
pentry->next = entry->next;
|
||||
ram_block_notify_remove(entry->vaddr_base, entry->size);
|
||||
ram_block_notify_remove(entry->vaddr_base, entry->size, entry->size);
|
||||
if (munmap(entry->vaddr_base, entry->size) != 0) {
|
||||
perror("unmap fails");
|
||||
exit(-1);
|
||||
|
@ -663,9 +663,6 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
|
||||
}
|
||||
|
||||
switch (pnd->reason) {
|
||||
case PRECOPY_NOTIFY_SETUP:
|
||||
precopy_enable_free_page_optimization();
|
||||
break;
|
||||
case PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC:
|
||||
virtio_balloon_free_page_stop(dev);
|
||||
break;
|
||||
@ -685,6 +682,7 @@ virtio_balloon_free_page_hint_notify(NotifierWithReturn *n, void *data)
|
||||
*/
|
||||
virtio_balloon_free_page_done(dev);
|
||||
break;
|
||||
case PRECOPY_NOTIFY_SETUP:
|
||||
case PRECOPY_NOTIFY_COMPLETE:
|
||||
break;
|
||||
default:
|
||||
|
@ -902,9 +902,6 @@ static int virtio_mem_precopy_notify(NotifierWithReturn *n, void *data)
|
||||
PrecopyNotifyData *pnd = data;
|
||||
|
||||
switch (pnd->reason) {
|
||||
case PRECOPY_NOTIFY_SETUP:
|
||||
precopy_enable_free_page_optimization();
|
||||
break;
|
||||
case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC:
|
||||
virtio_mem_precopy_exclude_unplugged(vmem);
|
||||
break;
|
||||
|
@ -57,6 +57,7 @@ const char *qemu_ram_get_idstr(RAMBlock *rb);
|
||||
void *qemu_ram_get_host_addr(RAMBlock *rb);
|
||||
ram_addr_t qemu_ram_get_offset(RAMBlock *rb);
|
||||
ram_addr_t qemu_ram_get_used_length(RAMBlock *rb);
|
||||
ram_addr_t qemu_ram_get_max_length(RAMBlock *rb);
|
||||
bool qemu_ram_is_shared(RAMBlock *rb);
|
||||
bool qemu_ram_is_uf_zeroable(RAMBlock *rb);
|
||||
void qemu_ram_set_uf_zeroable(RAMBlock *rb);
|
||||
|
@ -131,7 +131,7 @@ typedef struct IOMMUTLBEvent {
|
||||
#define RAM_SHARED (1 << 1)
|
||||
|
||||
/* Only a portion of RAM (used_length) is actually used, and migrated.
|
||||
* This used_length size can change across reboots.
|
||||
* Resizing RAM while migrating can result in the migration being canceled.
|
||||
*/
|
||||
#define RAM_RESIZEABLE (1 << 2)
|
||||
|
||||
@ -955,7 +955,9 @@ void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
|
||||
* RAM. Accesses into the region will
|
||||
* modify memory directly. Only an initial
|
||||
* portion of this RAM is actually used.
|
||||
* The used size can change across reboots.
|
||||
* Changing the size while migrating
|
||||
* can result in the migration being
|
||||
* canceled.
|
||||
*
|
||||
* @mr: the #MemoryRegion to be initialized.
|
||||
* @owner: the object that tracks the region's reference count
|
||||
@ -1586,8 +1588,8 @@ void *memory_region_get_ram_ptr(MemoryRegion *mr);
|
||||
|
||||
/* memory_region_ram_resize: Resize a RAM region.
|
||||
*
|
||||
* Only legal before guest might have detected the memory size: e.g. on
|
||||
* incoming migration, or right after reset.
|
||||
* Resizing RAM while migrating can result in the migration being canceled.
|
||||
* Care has to be taken if the guest might have already detected the memory.
|
||||
*
|
||||
* @mr: a memory region created with @memory_region_init_resizeable_ram.
|
||||
* @newsize: the new size the region
|
||||
|
@ -59,6 +59,16 @@ struct RAMBlock {
|
||||
*/
|
||||
unsigned long *clear_bmap;
|
||||
uint8_t clear_bmap_shift;
|
||||
|
||||
/*
|
||||
* RAM block length that corresponds to the used_length on the migration
|
||||
* source (after RAM block sizes were synchronized). Especially, after
|
||||
* starting to run the guest, used_length and postcopy_length can differ.
|
||||
* Used to register/unregister uffd handlers and as the size of the received
|
||||
* bitmap. Receiving any page beyond this length will bail out, as it
|
||||
* could not have been valid on the source.
|
||||
*/
|
||||
ram_addr_t postcopy_length;
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
|
@ -65,15 +65,20 @@ void qemu_mutex_lock_ramlist(void);
|
||||
void qemu_mutex_unlock_ramlist(void);
|
||||
|
||||
struct RAMBlockNotifier {
|
||||
void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size);
|
||||
void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size);
|
||||
void (*ram_block_added)(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size);
|
||||
void (*ram_block_removed)(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size);
|
||||
void (*ram_block_resized)(RAMBlockNotifier *n, void *host, size_t old_size,
|
||||
size_t new_size);
|
||||
QLIST_ENTRY(RAMBlockNotifier) next;
|
||||
};
|
||||
|
||||
void ram_block_notifier_add(RAMBlockNotifier *n);
|
||||
void ram_block_notifier_remove(RAMBlockNotifier *n);
|
||||
void ram_block_notify_add(void *host, size_t size);
|
||||
void ram_block_notify_remove(void *host, size_t size);
|
||||
void ram_block_notify_add(void *host, size_t size, size_t max_size);
|
||||
void ram_block_notify_remove(void *host, size_t size, size_t max_size);
|
||||
void ram_block_notify_resize(void *host, size_t old_size, size_t new_size);
|
||||
|
||||
void ram_block_dump(Monitor *mon);
|
||||
|
||||
|
@ -37,7 +37,6 @@ void precopy_infrastructure_init(void);
|
||||
void precopy_add_notifier(NotifierWithReturn *n);
|
||||
void precopy_remove_notifier(NotifierWithReturn *n);
|
||||
int precopy_notify(PrecopyNotifyReason reason, Error **errp);
|
||||
void precopy_enable_free_page_optimization(void);
|
||||
|
||||
void ram_mig_init(void);
|
||||
void qemu_guest_free_page_hint(void *addr, size_t len);
|
||||
|
@ -223,13 +223,18 @@ void migration_object_init(void)
|
||||
dirty_bitmap_mig_init();
|
||||
}
|
||||
|
||||
void migration_cancel(void)
|
||||
{
|
||||
migrate_fd_cancel(current_migration);
|
||||
}
|
||||
|
||||
void migration_shutdown(void)
|
||||
{
|
||||
/*
|
||||
* Cancel the current migration - that will (eventually)
|
||||
* stop the migration using this structure
|
||||
*/
|
||||
migrate_fd_cancel(current_migration);
|
||||
migration_cancel();
|
||||
object_unref(OBJECT(current_migration));
|
||||
|
||||
/*
|
||||
@ -1073,27 +1078,24 @@ static void populate_vfio_info(MigrationInfo *info)
|
||||
static void fill_source_migration_info(MigrationInfo *info)
|
||||
{
|
||||
MigrationState *s = migrate_get_current();
|
||||
GSList *cur_blocker = migration_blockers;
|
||||
|
||||
info->blocked = migration_is_blocked(NULL);
|
||||
info->has_blocked_reasons = info->blocked;
|
||||
info->blocked_reasons = NULL;
|
||||
if (info->blocked) {
|
||||
GSList *cur_blocker = migration_blockers;
|
||||
|
||||
/*
|
||||
* There are two types of reasons a migration might be blocked;
|
||||
* a) devices marked in VMState as non-migratable, and
|
||||
* b) Explicit migration blockers
|
||||
* We need to add both of them here.
|
||||
*/
|
||||
qemu_savevm_non_migratable_list(&info->blocked_reasons);
|
||||
/*
|
||||
* There are two types of reasons a migration might be blocked;
|
||||
* a) devices marked in VMState as non-migratable, and
|
||||
* b) Explicit migration blockers
|
||||
* We need to add both of them here.
|
||||
*/
|
||||
qemu_savevm_non_migratable_list(&info->blocked_reasons);
|
||||
|
||||
while (cur_blocker) {
|
||||
QAPI_LIST_PREPEND(info->blocked_reasons,
|
||||
g_strdup(error_get_pretty(cur_blocker->data)));
|
||||
cur_blocker = g_slist_next(cur_blocker);
|
||||
}
|
||||
while (cur_blocker) {
|
||||
QAPI_LIST_PREPEND(info->blocked_reasons,
|
||||
g_strdup(error_get_pretty(cur_blocker->data)));
|
||||
cur_blocker = g_slist_next(cur_blocker);
|
||||
}
|
||||
info->has_blocked_reasons = info->blocked_reasons != NULL;
|
||||
|
||||
switch (s->state) {
|
||||
case MIGRATION_STATUS_NONE:
|
||||
@ -2310,7 +2312,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
||||
|
||||
void qmp_migrate_cancel(Error **errp)
|
||||
{
|
||||
migrate_fd_cancel(migrate_get_current());
|
||||
migration_cancel();
|
||||
}
|
||||
|
||||
void qmp_migrate_continue(MigrationStatus state, Error **errp)
|
||||
|
@ -375,5 +375,6 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque);
|
||||
void migration_make_urgent_request(void);
|
||||
void migration_consume_urgent_request(void);
|
||||
bool migration_rate_limit(void);
|
||||
void migration_cancel(void);
|
||||
|
||||
#endif
|
||||
|
@ -361,7 +361,7 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
|
||||
if (offset > (block->used_length - qemu_target_page_size())) {
|
||||
error_setg(errp, "multifd: offset too long %" PRIu64
|
||||
" (max " RAM_ADDR_FMT ")",
|
||||
offset, block->max_length);
|
||||
offset, block->used_length);
|
||||
return -1;
|
||||
}
|
||||
p->pages->iov[i].iov_base = block->host + offset;
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/target_page.h"
|
||||
#include "migration.h"
|
||||
#include "qemu-file.h"
|
||||
@ -30,6 +31,7 @@
|
||||
#include "qemu/error-report.h"
|
||||
#include "trace.h"
|
||||
#include "hw/boards.h"
|
||||
#include "exec/ramblock.h"
|
||||
|
||||
/* Arbitrary limit on size of each discard command,
|
||||
* keeps them around ~200 bytes
|
||||
@ -452,6 +454,13 @@ static int init_range(RAMBlock *rb, void *opaque)
|
||||
ram_addr_t length = qemu_ram_get_used_length(rb);
|
||||
trace_postcopy_init_range(block_name, host_addr, offset, length);
|
||||
|
||||
/*
|
||||
* Save the used_length before running the guest. In case we have to
|
||||
* resize RAM blocks when syncing RAM block sizes from the source during
|
||||
* precopy, we'll update it manually via the ram block notifier.
|
||||
*/
|
||||
rb->postcopy_length = length;
|
||||
|
||||
/*
|
||||
* We need the whole of RAM to be truly empty for postcopy, so things
|
||||
* like ROMs and any data tables built during init must be zero'd
|
||||
@ -474,7 +483,7 @@ static int cleanup_range(RAMBlock *rb, void *opaque)
|
||||
const char *block_name = qemu_ram_get_idstr(rb);
|
||||
void *host_addr = qemu_ram_get_host_addr(rb);
|
||||
ram_addr_t offset = qemu_ram_get_offset(rb);
|
||||
ram_addr_t length = qemu_ram_get_used_length(rb);
|
||||
ram_addr_t length = rb->postcopy_length;
|
||||
MigrationIncomingState *mis = opaque;
|
||||
struct uffdio_range range_struct;
|
||||
trace_postcopy_cleanup_range(block_name, host_addr, offset, length);
|
||||
@ -580,7 +589,7 @@ static int nhp_range(RAMBlock *rb, void *opaque)
|
||||
const char *block_name = qemu_ram_get_idstr(rb);
|
||||
void *host_addr = qemu_ram_get_host_addr(rb);
|
||||
ram_addr_t offset = qemu_ram_get_offset(rb);
|
||||
ram_addr_t length = qemu_ram_get_used_length(rb);
|
||||
ram_addr_t length = rb->postcopy_length;
|
||||
trace_postcopy_nhp_range(block_name, host_addr, offset, length);
|
||||
|
||||
/*
|
||||
@ -624,7 +633,7 @@ static int ram_block_enable_notify(RAMBlock *rb, void *opaque)
|
||||
struct uffdio_register reg_struct;
|
||||
|
||||
reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb);
|
||||
reg_struct.range.len = qemu_ram_get_used_length(rb);
|
||||
reg_struct.range.len = rb->postcopy_length;
|
||||
reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
|
||||
|
||||
/* Now tell our userfault_fd that it's responsible for this area */
|
||||
|
246
migration/ram.c
246
migration/ram.c
@ -240,7 +240,7 @@ int64_t ramblock_recv_bitmap_send(QEMUFile *file,
|
||||
return -1;
|
||||
}
|
||||
|
||||
nbits = block->used_length >> TARGET_PAGE_BITS;
|
||||
nbits = block->postcopy_length >> TARGET_PAGE_BITS;
|
||||
|
||||
/*
|
||||
* Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
|
||||
@ -311,10 +311,6 @@ struct RAMState {
|
||||
ram_addr_t last_page;
|
||||
/* last ram version we have seen */
|
||||
uint32_t last_version;
|
||||
/* We are in the first round */
|
||||
bool ram_bulk_stage;
|
||||
/* The free page optimization is enabled */
|
||||
bool fpo_enabled;
|
||||
/* How many times we have dirty too many pages */
|
||||
int dirty_rate_high_cnt;
|
||||
/* these variables are used for bitmap sync */
|
||||
@ -330,6 +326,8 @@ struct RAMState {
|
||||
uint64_t xbzrle_pages_prev;
|
||||
/* Amount of xbzrle encoded bytes since the beginning of the period */
|
||||
uint64_t xbzrle_bytes_prev;
|
||||
/* Start using XBZRLE (e.g., after the first round). */
|
||||
bool xbzrle_enabled;
|
||||
|
||||
/* compression statistics since the beginning of the period */
|
||||
/* amount of count that no free thread to compress data */
|
||||
@ -383,15 +381,6 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp)
|
||||
return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
|
||||
}
|
||||
|
||||
void precopy_enable_free_page_optimization(void)
|
||||
{
|
||||
if (!ram_state) {
|
||||
return;
|
||||
}
|
||||
|
||||
ram_state->fpo_enabled = true;
|
||||
}
|
||||
|
||||
uint64_t ram_bytes_remaining(void)
|
||||
{
|
||||
return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
|
||||
@ -664,7 +653,7 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
|
||||
*/
|
||||
static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
|
||||
{
|
||||
if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
|
||||
if (!rs->xbzrle_enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -792,23 +781,12 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
|
||||
{
|
||||
unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
|
||||
unsigned long *bitmap = rb->bmap;
|
||||
unsigned long next;
|
||||
|
||||
if (ramblock_is_ignored(rb)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* When the free page optimization is enabled, we need to check the bitmap
|
||||
* to send the non-free pages rather than all the pages in the bulk stage.
|
||||
*/
|
||||
if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
|
||||
next = start + 1;
|
||||
} else {
|
||||
next = find_next_bit(bitmap, size, start);
|
||||
}
|
||||
|
||||
return next;
|
||||
return find_next_bit(bitmap, size, start);
|
||||
}
|
||||
|
||||
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
|
||||
@ -1185,8 +1163,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
|
||||
trace_ram_save_page(block->idstr, (uint64_t)offset, p);
|
||||
|
||||
XBZRLE_cache_lock();
|
||||
if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
|
||||
migrate_use_xbzrle()) {
|
||||
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
|
||||
pages = save_xbzrle_page(rs, &p, current_addr, block,
|
||||
offset, last_stage);
|
||||
if (!last_stage) {
|
||||
@ -1365,8 +1342,8 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
|
||||
*again = false;
|
||||
return false;
|
||||
}
|
||||
if ((((ram_addr_t)pss->page) << TARGET_PAGE_BITS)
|
||||
>= pss->block->used_length) {
|
||||
if (!offset_in_ramblock(pss->block,
|
||||
((ram_addr_t)pss->page) << TARGET_PAGE_BITS)) {
|
||||
/* Didn't find anything in this RAM Block */
|
||||
pss->page = 0;
|
||||
pss->block = QLIST_NEXT_RCU(pss->block, next);
|
||||
@ -1386,7 +1363,10 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
|
||||
pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
|
||||
/* Flag that we've looped */
|
||||
pss->complete_round = true;
|
||||
rs->ram_bulk_stage = false;
|
||||
/* After the first round, enable XBZRLE. */
|
||||
if (migrate_use_xbzrle()) {
|
||||
rs->xbzrle_enabled = true;
|
||||
}
|
||||
}
|
||||
/* Didn't find anything this time, but try again on the new block */
|
||||
*again = true;
|
||||
@ -1800,14 +1780,6 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
|
||||
}
|
||||
|
||||
if (block) {
|
||||
/*
|
||||
* As soon as we start servicing pages out of order, then we have
|
||||
* to kill the bulk stage, since the bulk stage assumes
|
||||
* in (migration_bitmap_find_and_reset_dirty) that every page is
|
||||
* dirty, that's no longer true.
|
||||
*/
|
||||
rs->ram_bulk_stage = false;
|
||||
|
||||
/*
|
||||
* We want the background search to continue from the queued page
|
||||
* since the guest is likely to want other pages near to the page
|
||||
@ -1891,7 +1863,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
|
||||
rs->last_req_rb = ramblock;
|
||||
}
|
||||
trace_ram_save_queue_pages(ramblock->idstr, start, len);
|
||||
if (start + len > ramblock->used_length) {
|
||||
if (!offset_in_ramblock(ramblock, start + len - 1)) {
|
||||
error_report("%s request overrun start=" RAM_ADDR_FMT " len="
|
||||
RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
|
||||
__func__, start, len, ramblock->used_length);
|
||||
@ -1920,15 +1892,15 @@ static bool save_page_use_compression(RAMState *rs)
|
||||
}
|
||||
|
||||
/*
|
||||
* If xbzrle is on, stop using the data compression after first
|
||||
* round of migration even if compression is enabled. In theory,
|
||||
* xbzrle can do better than compression.
|
||||
* If xbzrle is enabled (e.g., after first round of migration), stop
|
||||
* using the data compression. In theory, xbzrle can do better than
|
||||
* compression.
|
||||
*/
|
||||
if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
|
||||
return true;
|
||||
if (rs->xbzrle_enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2041,6 +2013,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
||||
int tmppages, pages = 0;
|
||||
size_t pagesize_bits =
|
||||
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
|
||||
unsigned long hostpage_boundary =
|
||||
QEMU_ALIGN_UP(pss->page + 1, pagesize_bits);
|
||||
unsigned long start_page = pss->page;
|
||||
int res;
|
||||
|
||||
@ -2051,25 +2025,27 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
|
||||
|
||||
do {
|
||||
/* Check the pages is dirty and if it is send it */
|
||||
if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
||||
pss->page++;
|
||||
continue;
|
||||
}
|
||||
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
|
||||
tmppages = ram_save_target_page(rs, pss, last_stage);
|
||||
if (tmppages < 0) {
|
||||
return tmppages;
|
||||
}
|
||||
|
||||
tmppages = ram_save_target_page(rs, pss, last_stage);
|
||||
if (tmppages < 0) {
|
||||
return tmppages;
|
||||
pages += tmppages;
|
||||
/*
|
||||
* Allow rate limiting to happen in the middle of huge pages if
|
||||
* something is sent in the current iteration.
|
||||
*/
|
||||
if (pagesize_bits > 1 && tmppages > 0) {
|
||||
migration_rate_limit();
|
||||
}
|
||||
}
|
||||
|
||||
pages += tmppages;
|
||||
pss->page++;
|
||||
/* Allow rate limiting to happen in the middle of huge pages */
|
||||
migration_rate_limit();
|
||||
} while ((pss->page & (pagesize_bits - 1)) &&
|
||||
pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
|
||||
} while ((pss->page < hostpage_boundary) &&
|
||||
offset_in_ramblock(pss->block,
|
||||
((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
|
||||
/* The offset we leave with is the last one we looked at */
|
||||
pss->page--;
|
||||
/* The offset we leave with is the min boundary of host page and block */
|
||||
pss->page = MIN(pss->page, hostpage_boundary) - 1;
|
||||
|
||||
res = ram_save_release_protection(rs, pss, start_page);
|
||||
return (res < 0 ? res : pages);
|
||||
@ -2235,8 +2211,7 @@ static void ram_state_reset(RAMState *rs)
|
||||
rs->last_sent_block = NULL;
|
||||
rs->last_page = 0;
|
||||
rs->last_version = ram_list.version;
|
||||
rs->ram_bulk_stage = true;
|
||||
rs->fpo_enabled = false;
|
||||
rs->xbzrle_enabled = false;
|
||||
}
|
||||
|
||||
#define MAX_WAIT 50 /* ms, half buffered_file limit */
|
||||
@ -2720,15 +2695,7 @@ static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
|
||||
/* This may not be aligned with current bitmaps. Recalculate. */
|
||||
rs->migration_dirty_pages = pages;
|
||||
|
||||
rs->last_seen_block = NULL;
|
||||
rs->last_sent_block = NULL;
|
||||
rs->last_page = 0;
|
||||
rs->last_version = ram_list.version;
|
||||
/*
|
||||
* Disable the bulk stage, otherwise we'll resend the whole RAM no
|
||||
* matter what we have sent.
|
||||
*/
|
||||
rs->ram_bulk_stage = false;
|
||||
ram_state_reset(rs);
|
||||
|
||||
/* Update RAMState cache of output QEMUFile */
|
||||
rs->f = out;
|
||||
@ -3118,6 +3085,20 @@ static inline void *host_from_ram_block_offset(RAMBlock *block,
|
||||
return block->host + offset;
|
||||
}
|
||||
|
||||
static void *host_page_from_ram_block_offset(RAMBlock *block,
|
||||
ram_addr_t offset)
|
||||
{
|
||||
/* Note: Explicitly no check against offset_in_ramblock(). */
|
||||
return (void *)QEMU_ALIGN_DOWN((uintptr_t)(block->host + offset),
|
||||
block->page_size);
|
||||
}
|
||||
|
||||
static ram_addr_t host_page_offset_from_ram_block_offset(RAMBlock *block,
|
||||
ram_addr_t offset)
|
||||
{
|
||||
return ((uintptr_t)block->host + offset) & (block->page_size - 1);
|
||||
}
|
||||
|
||||
static inline void *colo_cache_from_block_offset(RAMBlock *block,
|
||||
ram_addr_t offset, bool record_bitmap)
|
||||
{
|
||||
@ -3345,16 +3326,9 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* we must set ram_bulk_stage to false, otherwise in
|
||||
* migation_bitmap_find_dirty the bitmap will be unused and
|
||||
* all the pages in ram cache wil be flushed to the ram of
|
||||
* secondary VM.
|
||||
*/
|
||||
static void colo_init_ram_state(void)
|
||||
{
|
||||
ram_state_init(&ram_state);
|
||||
ram_state->ram_bulk_stage = false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3521,13 +3495,12 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
MigrationIncomingState *mis = migration_incoming_get_current();
|
||||
/* Temporary page that is later 'placed' */
|
||||
void *postcopy_host_page = mis->postcopy_tmp_page;
|
||||
void *this_host = NULL;
|
||||
void *host_page = NULL;
|
||||
bool all_zero = true;
|
||||
int target_pages = 0;
|
||||
|
||||
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
|
||||
ram_addr_t addr;
|
||||
void *host = NULL;
|
||||
void *page_buffer = NULL;
|
||||
void *place_source = NULL;
|
||||
RAMBlock *block = NULL;
|
||||
@ -3552,9 +3525,18 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
|
||||
RAM_SAVE_FLAG_COMPRESS_PAGE)) {
|
||||
block = ram_block_from_stream(f, flags);
|
||||
if (!block) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
host = host_from_ram_block_offset(block, addr);
|
||||
if (!host) {
|
||||
/*
|
||||
* Relying on used_length is racy and can result in false positives.
|
||||
* We might place pages beyond used_length in case RAM was shrunk
|
||||
* while in postcopy, which is fine - trying to place via
|
||||
* UFFDIO_COPY/UFFDIO_ZEROPAGE will never segfault.
|
||||
*/
|
||||
if (!block->host || addr >= block->postcopy_length) {
|
||||
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
@ -3572,19 +3554,17 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
* of a host page in one chunk.
|
||||
*/
|
||||
page_buffer = postcopy_host_page +
|
||||
((uintptr_t)host & (block->page_size - 1));
|
||||
host_page_offset_from_ram_block_offset(block, addr);
|
||||
/* If all TP are zero then we can optimise the place */
|
||||
if (target_pages == 1) {
|
||||
this_host = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
|
||||
block->page_size);
|
||||
} else {
|
||||
host_page = host_page_from_ram_block_offset(block, addr);
|
||||
} else if (host_page != host_page_from_ram_block_offset(block,
|
||||
addr)) {
|
||||
/* not the 1st TP within the HP */
|
||||
if (QEMU_ALIGN_DOWN((uintptr_t)host, block->page_size) !=
|
||||
(uintptr_t)this_host) {
|
||||
error_report("Non-same host page %p/%p",
|
||||
host, this_host);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
error_report("Non-same host page %p/%p", host_page,
|
||||
host_page_from_ram_block_offset(block, addr));
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3663,16 +3643,11 @@ static int ram_load_postcopy(QEMUFile *f)
|
||||
}
|
||||
|
||||
if (!ret && place_needed) {
|
||||
/* This gets called at the last target page in the host page */
|
||||
void *place_dest = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
|
||||
block->page_size);
|
||||
|
||||
if (all_zero) {
|
||||
ret = postcopy_place_page_zero(mis, place_dest,
|
||||
block);
|
||||
ret = postcopy_place_page_zero(mis, host_page, block);
|
||||
} else {
|
||||
ret = postcopy_place_page(mis, place_dest,
|
||||
place_source, block);
|
||||
ret = postcopy_place_page(mis, host_page, place_source,
|
||||
block);
|
||||
}
|
||||
place_needed = false;
|
||||
target_pages = 0;
|
||||
@ -3721,8 +3696,8 @@ void colo_flush_ram_cache(void)
|
||||
while (block) {
|
||||
offset = migration_bitmap_find_dirty(ram_state, block, offset);
|
||||
|
||||
if (((ram_addr_t)offset) << TARGET_PAGE_BITS
|
||||
>= block->used_length) {
|
||||
if (!offset_in_ramblock(block,
|
||||
((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
|
||||
offset = 0;
|
||||
block = QLIST_NEXT_RCU(block, next);
|
||||
} else {
|
||||
@ -4136,8 +4111,69 @@ static SaveVMHandlers savevm_ram_handlers = {
|
||||
.resume_prepare = ram_resume_prepare,
|
||||
};
|
||||
|
||||
static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host,
|
||||
size_t old_size, size_t new_size)
|
||||
{
|
||||
PostcopyState ps = postcopy_state_get();
|
||||
ram_addr_t offset;
|
||||
RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset);
|
||||
Error *err = NULL;
|
||||
|
||||
if (ramblock_is_ignored(rb)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!migration_is_idle()) {
|
||||
/*
|
||||
* Precopy code on the source cannot deal with the size of RAM blocks
|
||||
* changing at random points in time - especially after sending the
|
||||
* RAM block sizes in the migration stream, they must no longer change.
|
||||
* Abort and indicate a proper reason.
|
||||
*/
|
||||
error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr);
|
||||
migrate_set_error(migrate_get_current(), err);
|
||||
error_free(err);
|
||||
migration_cancel();
|
||||
}
|
||||
|
||||
switch (ps) {
|
||||
case POSTCOPY_INCOMING_ADVISE:
|
||||
/*
|
||||
* Update what ram_postcopy_incoming_init()->init_range() does at the
|
||||
* time postcopy was advised. Syncing RAM blocks with the source will
|
||||
* result in RAM resizes.
|
||||
*/
|
||||
if (old_size < new_size) {
|
||||
if (ram_discard_range(rb->idstr, old_size, new_size - old_size)) {
|
||||
error_report("RAM block '%s' discard of resized RAM failed",
|
||||
rb->idstr);
|
||||
}
|
||||
}
|
||||
rb->postcopy_length = new_size;
|
||||
break;
|
||||
case POSTCOPY_INCOMING_NONE:
|
||||
case POSTCOPY_INCOMING_RUNNING:
|
||||
case POSTCOPY_INCOMING_END:
|
||||
/*
|
||||
* Once our guest is running, postcopy does no longer care about
|
||||
* resizes. When growing, the new memory was not available on the
|
||||
* source, no handler needed.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
error_report("RAM block '%s' resized during postcopy state: %d",
|
||||
rb->idstr, ps);
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
|
||||
static RAMBlockNotifier ram_mig_ram_notifier = {
|
||||
.ram_block_resized = ram_mig_ram_block_resized,
|
||||
};
|
||||
|
||||
void ram_mig_init(void)
|
||||
{
|
||||
qemu_mutex_init(&XBZRLE.lock);
|
||||
register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
|
||||
ram_block_notifier_add(&ram_mig_ram_notifier);
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict)
|
||||
|
||||
migration_global_dump(mon);
|
||||
|
||||
if (info->blocked) {
|
||||
if (info->blocked_reasons) {
|
||||
strList *reasons = info->blocked_reasons;
|
||||
monitor_printf(mon, "Outgoing migration blocked:\n");
|
||||
while (reasons) {
|
||||
|
@ -228,11 +228,6 @@
|
||||
# Present and non-empty when migration is blocked.
|
||||
# (since 6.0)
|
||||
#
|
||||
# @blocked: True if outgoing migration is blocked (since 6.0)
|
||||
#
|
||||
# Features:
|
||||
# @deprecated: Member @blocked is deprecated. Use @blocked-reasons instead.
|
||||
#
|
||||
# Since: 0.14
|
||||
##
|
||||
{ 'struct': 'MigrationInfo',
|
||||
@ -246,7 +241,6 @@
|
||||
'*setup-time': 'int',
|
||||
'*cpu-throttle-percentage': 'int',
|
||||
'*error-desc': 'str',
|
||||
'blocked': { 'type': 'bool', 'features': [ 'deprecated' ] },
|
||||
'*blocked-reasons': ['str'],
|
||||
'*postcopy-blocktime' : 'uint32',
|
||||
'*postcopy-vcpu-blocktime': ['uint32'],
|
||||
|
@ -1694,6 +1694,11 @@ ram_addr_t qemu_ram_get_used_length(RAMBlock *rb)
|
||||
return rb->used_length;
|
||||
}
|
||||
|
||||
ram_addr_t qemu_ram_get_max_length(RAMBlock *rb)
|
||||
{
|
||||
return rb->max_length;
|
||||
}
|
||||
|
||||
bool qemu_ram_is_shared(RAMBlock *rb)
|
||||
{
|
||||
return rb->flags & RAM_SHARED;
|
||||
@ -1793,8 +1798,9 @@ static int memory_try_enable_merging(void *addr, size_t len)
|
||||
return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
|
||||
}
|
||||
|
||||
/* Only legal before guest might have detected the memory size: e.g. on
|
||||
* incoming migration, or right after reset.
|
||||
/*
|
||||
* Resizing RAM while migrating can result in the migration being canceled.
|
||||
* Care has to be taken if the guest might have already detected the memory.
|
||||
*
|
||||
* As memory core doesn't know how is memory accessed, it is up to
|
||||
* resize callback to update device state and/or add assertions to detect
|
||||
@ -1802,6 +1808,7 @@ static int memory_try_enable_merging(void *addr, size_t len)
|
||||
*/
|
||||
int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
|
||||
{
|
||||
const ram_addr_t oldsize = block->used_length;
|
||||
const ram_addr_t unaligned_size = newsize;
|
||||
|
||||
assert(block);
|
||||
@ -1838,6 +1845,11 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Notify before modifying the ram block and touching the bitmaps. */
|
||||
if (block->host) {
|
||||
ram_block_notify_resize(block->host, oldsize, newsize);
|
||||
}
|
||||
|
||||
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
|
||||
block->used_length = newsize;
|
||||
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
|
||||
@ -2005,7 +2017,8 @@ static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
|
||||
qemu_madvise(new_block->host, new_block->max_length,
|
||||
QEMU_MADV_DONTFORK);
|
||||
}
|
||||
ram_block_notify_add(new_block->host, new_block->max_length);
|
||||
ram_block_notify_add(new_block->host, new_block->used_length,
|
||||
new_block->max_length);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2184,7 +2197,8 @@ void qemu_ram_free(RAMBlock *block)
|
||||
}
|
||||
|
||||
if (block->host) {
|
||||
ram_block_notify_remove(block->host, block->max_length);
|
||||
ram_block_notify_remove(block->host, block->used_length,
|
||||
block->max_length);
|
||||
}
|
||||
|
||||
qemu_mutex_lock_ramlist();
|
||||
@ -3486,7 +3500,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((start + length) <= rb->used_length) {
|
||||
if ((start + length) <= rb->max_length) {
|
||||
bool need_madvise, need_fallocate;
|
||||
if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
|
||||
error_report("ram_block_discard_range: Unaligned length: %zx",
|
||||
@ -3553,7 +3567,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
|
||||
} else {
|
||||
error_report("ram_block_discard_range: Overrun block '%s' (%" PRIu64
|
||||
"/%zx/" RAM_ADDR_FMT")",
|
||||
rb->idstr, start, length, rb->used_length);
|
||||
rb->idstr, start, length, rb->max_length);
|
||||
}
|
||||
|
||||
err:
|
||||
|
@ -293,7 +293,8 @@ static MemoryListener hax_memory_listener = {
|
||||
.priority = 10,
|
||||
};
|
||||
|
||||
static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
|
||||
static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size)
|
||||
{
|
||||
/*
|
||||
* We must register each RAM block with the HAXM kernel module, or
|
||||
@ -304,7 +305,7 @@ static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
|
||||
* host physical pages for the RAM block as part of this registration
|
||||
* process, hence the name hax_populate_ram().
|
||||
*/
|
||||
if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) {
|
||||
if (hax_populate_ram((uint64_t)(uintptr_t)host, max_size) < 0) {
|
||||
fprintf(stderr, "HAX failed to populate RAM\n");
|
||||
abort();
|
||||
}
|
||||
|
@ -180,7 +180,8 @@ sev_set_guest_state(SevGuestState *sev, SevState new_state)
|
||||
}
|
||||
|
||||
static void
|
||||
sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
|
||||
sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size)
|
||||
{
|
||||
int r;
|
||||
struct kvm_enc_region range;
|
||||
@ -197,19 +198,20 @@ sev_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
|
||||
}
|
||||
|
||||
range.addr = (__u64)(unsigned long)host;
|
||||
range.size = size;
|
||||
range.size = max_size;
|
||||
|
||||
trace_kvm_memcrypt_register_region(host, size);
|
||||
trace_kvm_memcrypt_register_region(host, max_size);
|
||||
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_REG_REGION, &range);
|
||||
if (r) {
|
||||
error_report("%s: failed to register region (%p+%#zx) error '%s'",
|
||||
__func__, host, size, strerror(errno));
|
||||
__func__, host, max_size, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size)
|
||||
sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size,
|
||||
size_t max_size)
|
||||
{
|
||||
int r;
|
||||
struct kvm_enc_region range;
|
||||
@ -226,13 +228,13 @@ sev_ram_block_removed(RAMBlockNotifier *n, void *host, size_t size)
|
||||
}
|
||||
|
||||
range.addr = (__u64)(unsigned long)host;
|
||||
range.size = size;
|
||||
range.size = max_size;
|
||||
|
||||
trace_kvm_memcrypt_unregister_region(host, size);
|
||||
trace_kvm_memcrypt_unregister_region(host, max_size);
|
||||
r = kvm_vm_ioctl(kvm_state, KVM_MEMORY_ENCRYPT_UNREG_REGION, &range);
|
||||
if (r) {
|
||||
error_report("%s: failed to unregister region (%p+%#zx)",
|
||||
__func__, host, size);
|
||||
__func__, host, max_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,4 +121,18 @@ COMPARISONS = [
|
||||
Scenario("compr-xbzrle-cache-50",
|
||||
compression_xbzrle=True, compression_xbzrle_cache=50),
|
||||
]),
|
||||
|
||||
|
||||
# Looking at effect of multifd with
|
||||
# varying numbers of channels
|
||||
Comparison("compr-multifd", scenarios = [
|
||||
Scenario("compr-multifd-channels-4",
|
||||
multifd=True, multifd_channels=2),
|
||||
Scenario("compr-multifd-channels-8",
|
||||
multifd=True, multifd_channels=8),
|
||||
Scenario("compr-multifd-channels-32",
|
||||
multifd=True, multifd_channels=32),
|
||||
Scenario("compr-multifd-channels-64",
|
||||
multifd=True, multifd_channels=64),
|
||||
]),
|
||||
]
|
||||
|
@ -188,6 +188,22 @@ class Engine(object):
|
||||
1024 * 1024 * 1024 / 100 *
|
||||
scenario._compression_xbzrle_cache))
|
||||
|
||||
if scenario._multifd:
|
||||
resp = src.command("migrate-set-capabilities",
|
||||
capabilities = [
|
||||
{ "capability": "multifd",
|
||||
"state": True }
|
||||
])
|
||||
resp = src.command("migrate-set-parameters",
|
||||
multifd_channels=scenario._multifd_channels)
|
||||
resp = dst.command("migrate-set-capabilities",
|
||||
capabilities = [
|
||||
{ "capability": "multifd",
|
||||
"state": True }
|
||||
])
|
||||
resp = dst.command("migrate-set-parameters",
|
||||
multifd_channels=scenario._multifd_channels)
|
||||
|
||||
resp = src.command("migrate", uri=connect_uri)
|
||||
|
||||
post_copy = False
|
||||
|
@ -29,7 +29,8 @@ class Scenario(object):
|
||||
post_copy=False, post_copy_iters=5,
|
||||
auto_converge=False, auto_converge_step=10,
|
||||
compression_mt=False, compression_mt_threads=1,
|
||||
compression_xbzrle=False, compression_xbzrle_cache=10):
|
||||
compression_xbzrle=False, compression_xbzrle_cache=10,
|
||||
multifd=False, multifd_channels=2):
|
||||
|
||||
self._name = name
|
||||
|
||||
@ -56,6 +57,9 @@ class Scenario(object):
|
||||
self._compression_xbzrle = compression_xbzrle
|
||||
self._compression_xbzrle_cache = compression_xbzrle_cache # percentage of guest RAM
|
||||
|
||||
self._multifd = multifd
|
||||
self._multifd_channels = multifd_channels
|
||||
|
||||
def serialize(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
@ -73,6 +77,8 @@ class Scenario(object):
|
||||
"compression_mt_threads": self._compression_mt_threads,
|
||||
"compression_xbzrle": self._compression_xbzrle,
|
||||
"compression_xbzrle_cache": self._compression_xbzrle_cache,
|
||||
"multifd": self._multifd,
|
||||
"multifd_channels": self._multifd_channels,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@ -92,4 +98,6 @@ class Scenario(object):
|
||||
data["compression_mt"],
|
||||
data["compression_mt_threads"],
|
||||
data["compression_xbzrle"],
|
||||
data["compression_xbzrle_cache"])
|
||||
data["compression_xbzrle_cache"],
|
||||
data["multifd"],
|
||||
data["multifd_channels"])
|
||||
|
@ -122,6 +122,11 @@ class Shell(BaseShell):
|
||||
parser.add_argument("--compression-xbzrle", dest="compression_xbzrle", default=False, action="store_true")
|
||||
parser.add_argument("--compression-xbzrle-cache", dest="compression_xbzrle_cache", default=10, type=int)
|
||||
|
||||
parser.add_argument("--multifd", dest="multifd", default=False,
|
||||
action="store_true")
|
||||
parser.add_argument("--multifd-channels", dest="multifd_channels",
|
||||
default=2, type=int)
|
||||
|
||||
def get_scenario(self, args):
|
||||
return Scenario(name="perfreport",
|
||||
downtime=args.downtime,
|
||||
@ -142,7 +147,10 @@ class Shell(BaseShell):
|
||||
compression_mt_threads=args.compression_mt_threads,
|
||||
|
||||
compression_xbzrle=args.compression_xbzrle,
|
||||
compression_xbzrle_cache=args.compression_xbzrle_cache)
|
||||
compression_xbzrle_cache=args.compression_xbzrle_cache,
|
||||
|
||||
multifd=args.multifd,
|
||||
multifd_channels=args.multifd_channels)
|
||||
|
||||
def run(self, argv):
|
||||
args = self._parser.parse_args(argv)
|
||||
|
@ -110,13 +110,12 @@ static void init_bootfile(const char *bootpath, void *content, size_t len)
|
||||
*/
|
||||
static void wait_for_serial(const char *side)
|
||||
{
|
||||
char *serialpath = g_strdup_printf("%s/%s", tmpfs, side);
|
||||
g_autofree char *serialpath = g_strdup_printf("%s/%s", tmpfs, side);
|
||||
FILE *serialfile = fopen(serialpath, "r");
|
||||
const char *arch = qtest_get_arch();
|
||||
int started = (strcmp(side, "src_serial") == 0 &&
|
||||
strcmp(arch, "ppc64") == 0) ? 0 : 1;
|
||||
|
||||
g_free(serialpath);
|
||||
do {
|
||||
int readvalue = fgetc(serialfile);
|
||||
|
||||
@ -274,10 +273,9 @@ static void check_guests_ram(QTestState *who)
|
||||
|
||||
static void cleanup(const char *filename)
|
||||
{
|
||||
char *path = g_strdup_printf("%s/%s", tmpfs, filename);
|
||||
g_autofree char *path = g_strdup_printf("%s/%s", tmpfs, filename);
|
||||
|
||||
unlink(path);
|
||||
g_free(path);
|
||||
}
|
||||
|
||||
static char *SocketAddress_to_str(SocketAddress *addr)
|
||||
@ -374,11 +372,8 @@ static char *migrate_get_parameter_str(QTestState *who,
|
||||
static void migrate_check_parameter_str(QTestState *who, const char *parameter,
|
||||
const char *value)
|
||||
{
|
||||
char *result;
|
||||
|
||||
result = migrate_get_parameter_str(who, parameter);
|
||||
g_autofree char *result = migrate_get_parameter_str(who, parameter);
|
||||
g_assert_cmpstr(result, ==, value);
|
||||
g_free(result);
|
||||
}
|
||||
|
||||
static void migrate_set_parameter_str(QTestState *who, const char *parameter,
|
||||
@ -495,12 +490,14 @@ static void migrate_start_destroy(MigrateStart *args)
|
||||
static int test_migrate_start(QTestState **from, QTestState **to,
|
||||
const char *uri, MigrateStart *args)
|
||||
{
|
||||
gchar *arch_source, *arch_target;
|
||||
gchar *cmd_source, *cmd_target;
|
||||
g_autofree gchar *arch_source = NULL;
|
||||
g_autofree gchar *arch_target = NULL;
|
||||
g_autofree gchar *cmd_source = NULL;
|
||||
g_autofree gchar *cmd_target = NULL;
|
||||
const gchar *ignore_stderr;
|
||||
char *bootpath = NULL;
|
||||
char *shmem_opts;
|
||||
char *shmem_path;
|
||||
g_autofree char *bootpath = NULL;
|
||||
g_autofree char *shmem_opts = NULL;
|
||||
g_autofree char *shmem_path = NULL;
|
||||
const char *arch = qtest_get_arch();
|
||||
const char *machine_opts = NULL;
|
||||
const char *memory_size;
|
||||
@ -559,8 +556,6 @@ static int test_migrate_start(QTestState **from, QTestState **to,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
g_free(bootpath);
|
||||
|
||||
if (!getenv("QTEST_LOG") && args->hide_stderr) {
|
||||
ignore_stderr = "2>/dev/null";
|
||||
} else {
|
||||
@ -588,11 +583,9 @@ static int test_migrate_start(QTestState **from, QTestState **to,
|
||||
memory_size, tmpfs,
|
||||
arch_source, shmem_opts, args->opts_source,
|
||||
ignore_stderr);
|
||||
g_free(arch_source);
|
||||
if (!args->only_target) {
|
||||
*from = qtest_init(cmd_source);
|
||||
}
|
||||
g_free(cmd_source);
|
||||
|
||||
cmd_target = g_strdup_printf("-accel kvm -accel tcg%s%s "
|
||||
"-name target,debug-threads=on "
|
||||
@ -605,18 +598,14 @@ static int test_migrate_start(QTestState **from, QTestState **to,
|
||||
memory_size, tmpfs, uri,
|
||||
arch_target, shmem_opts,
|
||||
args->opts_target, ignore_stderr);
|
||||
g_free(arch_target);
|
||||
*to = qtest_init(cmd_target);
|
||||
g_free(cmd_target);
|
||||
|
||||
g_free(shmem_opts);
|
||||
/*
|
||||
* Remove shmem file immediately to avoid memory leak in test failed case.
|
||||
* It's valid becase QEMU has already opened this file
|
||||
*/
|
||||
if (args->use_shmem) {
|
||||
unlink(shmem_path);
|
||||
g_free(shmem_path);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -662,7 +651,7 @@ static int migrate_postcopy_prepare(QTestState **from_ptr,
|
||||
QTestState **to_ptr,
|
||||
MigrateStart *args)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
QTestState *from, *to;
|
||||
|
||||
if (test_migrate_start(&from, &to, uri, args)) {
|
||||
@ -684,7 +673,6 @@ static int migrate_postcopy_prepare(QTestState **from_ptr,
|
||||
wait_for_serial("src_serial");
|
||||
|
||||
migrate_qmp(from, uri, "{}");
|
||||
g_free(uri);
|
||||
|
||||
wait_for_migration_pass(from);
|
||||
|
||||
@ -724,7 +712,7 @@ static void test_postcopy_recovery(void)
|
||||
{
|
||||
MigrateStart *args = migrate_start_new();
|
||||
QTestState *from, *to;
|
||||
char *uri;
|
||||
g_autofree char *uri = NULL;
|
||||
|
||||
args->hide_stderr = true;
|
||||
|
||||
@ -775,7 +763,6 @@ static void test_postcopy_recovery(void)
|
||||
(const char * []) { "failed", "active",
|
||||
"completed", NULL });
|
||||
migrate_qmp(from, uri, "{'resume': true}");
|
||||
g_free(uri);
|
||||
|
||||
/* Restore the postcopy bandwidth to unlimited */
|
||||
migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0);
|
||||
@ -800,7 +787,7 @@ static void test_baddest(void)
|
||||
|
||||
static void test_precopy_unix(void)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
MigrateStart *args = migrate_start_new();
|
||||
QTestState *from, *to;
|
||||
|
||||
@ -836,14 +823,13 @@ static void test_precopy_unix(void)
|
||||
wait_for_migration_complete(from);
|
||||
|
||||
test_migrate_end(from, to, true);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Currently upset on aarch64 TCG */
|
||||
static void test_ignore_shared(void)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
QTestState *from, *to;
|
||||
|
||||
if (test_migrate_start(&from, &to, uri, false, true, NULL, NULL)) {
|
||||
@ -873,7 +859,6 @@ static void test_ignore_shared(void)
|
||||
g_assert_cmpint(read_ram_property_int(from, "transferred"), <, 1024 * 1024);
|
||||
|
||||
test_migrate_end(from, to, true);
|
||||
g_free(uri);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -898,8 +883,8 @@ static void test_xbzrle(const char *uri)
|
||||
|
||||
migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432);
|
||||
|
||||
migrate_set_capability(from, "xbzrle", "true");
|
||||
migrate_set_capability(to, "xbzrle", "true");
|
||||
migrate_set_capability(from, "xbzrle", true);
|
||||
migrate_set_capability(to, "xbzrle", true);
|
||||
/* Wait for the first serial output from the source */
|
||||
wait_for_serial("src_serial");
|
||||
|
||||
@ -925,16 +910,15 @@ static void test_xbzrle(const char *uri)
|
||||
|
||||
static void test_xbzrle_unix(void)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
|
||||
test_xbzrle(uri);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
static void test_precopy_tcp(void)
|
||||
{
|
||||
MigrateStart *args = migrate_start_new();
|
||||
char *uri;
|
||||
g_autofree char *uri = NULL;
|
||||
QTestState *from, *to;
|
||||
|
||||
if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", args)) {
|
||||
@ -971,7 +955,6 @@ static void test_precopy_tcp(void)
|
||||
wait_for_migration_complete(from);
|
||||
|
||||
test_migrate_end(from, to, true);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
static void test_migrate_fd_proto(void)
|
||||
@ -1060,7 +1043,7 @@ static void test_migrate_fd_proto(void)
|
||||
|
||||
static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
QTestState *from, *to;
|
||||
|
||||
if (test_migrate_start(&from, &to, uri, args)) {
|
||||
@ -1088,7 +1071,6 @@ static void do_test_validate_uuid(MigrateStart *args, bool should_fail)
|
||||
}
|
||||
|
||||
test_migrate_end(from, to, false);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
static void test_validate_uuid(void)
|
||||
@ -1136,7 +1118,7 @@ static void test_validate_uuid_dst_not_set(void)
|
||||
|
||||
static void test_migrate_auto_converge(void)
|
||||
{
|
||||
char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs);
|
||||
MigrateStart *args = migrate_start_new();
|
||||
QTestState *from, *to;
|
||||
int64_t remaining, percentage;
|
||||
@ -1214,7 +1196,6 @@ static void test_migrate_auto_converge(void)
|
||||
wait_for_serial("dest_serial");
|
||||
wait_for_migration_complete(from);
|
||||
|
||||
g_free(uri);
|
||||
|
||||
test_migrate_end(from, to, true);
|
||||
}
|
||||
@ -1224,7 +1205,7 @@ static void test_multifd_tcp(const char *method)
|
||||
MigrateStart *args = migrate_start_new();
|
||||
QTestState *from, *to;
|
||||
QDict *rsp;
|
||||
char *uri;
|
||||
g_autofree char *uri = NULL;
|
||||
|
||||
if (test_migrate_start(&from, &to, "defer", args)) {
|
||||
return;
|
||||
@ -1246,8 +1227,8 @@ static void test_multifd_tcp(const char *method)
|
||||
migrate_set_parameter_str(from, "multifd-compression", method);
|
||||
migrate_set_parameter_str(to, "multifd-compression", method);
|
||||
|
||||
migrate_set_capability(from, "multifd", "true");
|
||||
migrate_set_capability(to, "multifd", "true");
|
||||
migrate_set_capability(from, "multifd", true);
|
||||
migrate_set_capability(to, "multifd", true);
|
||||
|
||||
/* Start incoming migration from the 1st socket */
|
||||
rsp = wait_command(to, "{ 'execute': 'migrate-incoming',"
|
||||
@ -1273,7 +1254,6 @@ static void test_multifd_tcp(const char *method)
|
||||
wait_for_serial("dest_serial");
|
||||
wait_for_migration_complete(from);
|
||||
test_migrate_end(from, to, true);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
static void test_multifd_tcp_none(void)
|
||||
@ -1309,7 +1289,7 @@ static void test_multifd_tcp_cancel(void)
|
||||
MigrateStart *args = migrate_start_new();
|
||||
QTestState *from, *to, *to2;
|
||||
QDict *rsp;
|
||||
char *uri;
|
||||
g_autofree char *uri = NULL;
|
||||
|
||||
args->hide_stderr = true;
|
||||
|
||||
@ -1330,8 +1310,8 @@ static void test_multifd_tcp_cancel(void)
|
||||
migrate_set_parameter_int(from, "multifd-channels", 16);
|
||||
migrate_set_parameter_int(to, "multifd-channels", 16);
|
||||
|
||||
migrate_set_capability(from, "multifd", "true");
|
||||
migrate_set_capability(to, "multifd", "true");
|
||||
migrate_set_capability(from, "multifd", true);
|
||||
migrate_set_capability(to, "multifd", true);
|
||||
|
||||
/* Start incoming migration from the 1st socket */
|
||||
rsp = wait_command(to, "{ 'execute': 'migrate-incoming',"
|
||||
@ -1358,7 +1338,7 @@ static void test_multifd_tcp_cancel(void)
|
||||
|
||||
migrate_set_parameter_int(to2, "multifd-channels", 16);
|
||||
|
||||
migrate_set_capability(to2, "multifd", "true");
|
||||
migrate_set_capability(to2, "multifd", true);
|
||||
|
||||
/* Start incoming migration from the 1st socket */
|
||||
rsp = wait_command(to2, "{ 'execute': 'migrate-incoming',"
|
||||
@ -1387,7 +1367,6 @@ static void test_multifd_tcp_cancel(void)
|
||||
wait_for_serial("dest_serial");
|
||||
wait_for_migration_complete(from);
|
||||
test_migrate_end(from, to2, true);
|
||||
g_free(uri);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
|
@ -459,51 +459,38 @@ fail_container:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qemu_vfio_ram_block_added(RAMBlockNotifier *n,
|
||||
void *host, size_t size)
|
||||
static void qemu_vfio_ram_block_added(RAMBlockNotifier *n, void *host,
|
||||
size_t size, size_t max_size)
|
||||
{
|
||||
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
|
||||
trace_qemu_vfio_ram_block_added(s, host, size);
|
||||
qemu_vfio_dma_map(s, host, size, false, NULL);
|
||||
int ret;
|
||||
|
||||
trace_qemu_vfio_ram_block_added(s, host, max_size);
|
||||
ret = qemu_vfio_dma_map(s, host, max_size, false, NULL);
|
||||
if (ret) {
|
||||
error_report("qemu_vfio_dma_map(%p, %zu) failed: %s", host, max_size,
|
||||
strerror(-ret));
|
||||
}
|
||||
}
|
||||
|
||||
static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n,
|
||||
void *host, size_t size)
|
||||
static void qemu_vfio_ram_block_removed(RAMBlockNotifier *n, void *host,
|
||||
size_t size, size_t max_size)
|
||||
{
|
||||
QEMUVFIOState *s = container_of(n, QEMUVFIOState, ram_notifier);
|
||||
if (host) {
|
||||
trace_qemu_vfio_ram_block_removed(s, host, size);
|
||||
trace_qemu_vfio_ram_block_removed(s, host, max_size);
|
||||
qemu_vfio_dma_unmap(s, host);
|
||||
}
|
||||
}
|
||||
|
||||
static int qemu_vfio_init_ramblock(RAMBlock *rb, void *opaque)
|
||||
{
|
||||
void *host_addr = qemu_ram_get_host_addr(rb);
|
||||
ram_addr_t length = qemu_ram_get_used_length(rb);
|
||||
int ret;
|
||||
QEMUVFIOState *s = opaque;
|
||||
|
||||
if (!host_addr) {
|
||||
return 0;
|
||||
}
|
||||
ret = qemu_vfio_dma_map(s, host_addr, length, false, NULL);
|
||||
if (ret) {
|
||||
fprintf(stderr, "qemu_vfio_init_ramblock: failed %p %" PRId64 "\n",
|
||||
host_addr, (uint64_t)length);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qemu_vfio_open_common(QEMUVFIOState *s)
|
||||
{
|
||||
qemu_mutex_init(&s->lock);
|
||||
s->ram_notifier.ram_block_added = qemu_vfio_ram_block_added;
|
||||
s->ram_notifier.ram_block_removed = qemu_vfio_ram_block_removed;
|
||||
ram_block_notifier_add(&s->ram_notifier);
|
||||
s->low_water_mark = QEMU_VFIO_IOVA_MIN;
|
||||
s->high_water_mark = QEMU_VFIO_IOVA_MAX;
|
||||
qemu_ram_foreach_block(qemu_vfio_init_ramblock, s);
|
||||
ram_block_notifier_add(&s->ram_notifier);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user