mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-27 13:30:52 +00:00
ppc: Huge page detection mechanism fixes - Episode III
After already fixing two issues with the huge page detection mechanism (see commit159d2e39a8
and86b50f2e1b
), Greg Kurz noticed another case that caused the guest to crash where QEMU announces huge pages though they should not be available for the guest: qemu-system-ppc64 -enable-kvm ... -mem-path /dev/hugepages \ -m 1G,slots=4,maxmem=32G -object memory-backend-ram,policy=default,size=1G,id=mem-mem1 \ -device pc-dimm,id=dimm-mem1,memdev=mem-mem1 -smp 2 \ -numa node,nodeid=0 -numa node,nodeid=1 That means if there is a global mem-path option, we still have to look at the memory-backend objects that have been specified additionally and return their minimum page size if that value is smaller than the page size of the main memory. Reported-by: Greg Kurz <groug@kaod.org> Signed-off-by: Thomas Huth <thuth@redhat.com> Reviewed-by: Greg Kurz <groug@kaod.org> Tested-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
2d2e632ad0
commit
3d4f253483
@ -366,10 +366,13 @@ static int find_max_supported_pagesize(Object *obj, void *opaque)
|
||||
static long getrampagesize(void)
|
||||
{
|
||||
long hpsize = LONG_MAX;
|
||||
long mainrampagesize;
|
||||
Object *memdev_root;
|
||||
|
||||
if (mem_path) {
|
||||
return gethugepagesize(mem_path);
|
||||
mainrampagesize = gethugepagesize(mem_path);
|
||||
} else {
|
||||
mainrampagesize = getpagesize();
|
||||
}
|
||||
|
||||
/* it's possible we have memory-backend objects with
|
||||
@ -383,28 +386,26 @@ static long getrampagesize(void)
|
||||
* backend isn't backed by hugepages.
|
||||
*/
|
||||
memdev_root = object_resolve_path("/objects", NULL);
|
||||
if (!memdev_root) {
|
||||
return getpagesize();
|
||||
if (memdev_root) {
|
||||
object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
|
||||
}
|
||||
|
||||
object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
|
||||
|
||||
if (hpsize == LONG_MAX || hpsize == getpagesize()) {
|
||||
return getpagesize();
|
||||
if (hpsize == LONG_MAX) {
|
||||
/* No additional memory regions found ==> Report main RAM page size */
|
||||
return mainrampagesize;
|
||||
}
|
||||
|
||||
/* If NUMA is disabled or the NUMA nodes are not backed with a
|
||||
* memory-backend, then there is at least one node using "normal"
|
||||
* RAM. And since normal RAM has not been configured with "-mem-path"
|
||||
* (what we've checked earlier here already), we can not use huge pages!
|
||||
* memory-backend, then there is at least one node using "normal" RAM,
|
||||
* so if its page size is smaller we have got to report that size instead.
|
||||
*/
|
||||
if (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL) {
|
||||
if (hpsize > mainrampagesize &&
|
||||
(nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
|
||||
static bool warned;
|
||||
if (!warned) {
|
||||
error_report("Huge page support disabled (n/a for main memory).");
|
||||
warned = true;
|
||||
}
|
||||
return getpagesize();
|
||||
return mainrampagesize;
|
||||
}
|
||||
|
||||
return hpsize;
|
||||
|
Loading…
Reference in New Issue
Block a user