mirror of
https://github.com/shadps4-emu/shadPS4.git
synced 2026-01-31 00:55:19 +01:00
Kernel.Vmm: Handle sparse physical memory usage + other fixes (#3932)
* Initial work * Bug fixing deadlocks and broken unmaps * Fix more bugs broken memory pools * More bug fixing Still plenty more to fix though * Even more bug fixing Finally got Final Fantasy XV back to running, haven't found anymore bugs yet. * More bugfixing * Update memory.cpp * Rewrite start * Fix for oversized unmaps * Oops * Update address_space.cpp * Clang * Mac fix? * Track VMA physical areas based on start in VMA Allows me to simplify some logic, and should (finally) allow merging VMAs in memory code. * Merge VMAs, fix some bugs Finally possible thanks to address space + phys tracking changes * Clang * Oops * Oops2 * Oops3 * Bugfixing * SDK check for coalescing Just to rule out any issues from games that wouldn't see coalescing in the first place. * More ReleaseDirectMemory fixes I really suck at logic some days * Merge physical areas within VMAs In games that perform a lot of similar mappings, you can wind up with 1000+ phys areas in one vma. This should reduce some of the overhead that might cause. * Hopefully fix Mac compile Why must their uint64_t be different? * Mac pt.2 Oops
This commit is contained in:
@@ -93,7 +93,10 @@ static u64 BackingSize = ORBIS_KERNEL_TOTAL_MEM_DEV_PRO;
|
||||
|
||||
struct MemoryRegion {
|
||||
VAddr base;
|
||||
size_t size;
|
||||
PAddr phys_base;
|
||||
u64 size;
|
||||
u32 prot;
|
||||
s32 fd;
|
||||
bool is_mapped;
|
||||
};
|
||||
|
||||
@@ -159,7 +162,8 @@ struct AddressSpace::Impl {
|
||||
// Restrict region size to avoid overly fragmenting the virtual memory space.
|
||||
if (info.State == MEM_FREE && info.RegionSize > 0x1000000) {
|
||||
VAddr addr = Common::AlignUp(reinterpret_cast<VAddr>(info.BaseAddress), alignment);
|
||||
regions.emplace(addr, MemoryRegion{addr, size, false});
|
||||
regions.emplace(addr,
|
||||
MemoryRegion{addr, PAddr(-1), size, PAGE_NOACCESS, -1, false});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,29 +211,32 @@ struct AddressSpace::Impl {
|
||||
~Impl() {
|
||||
if (virtual_base) {
|
||||
if (!VirtualFree(virtual_base, 0, MEM_RELEASE)) {
|
||||
LOG_CRITICAL(Render, "Failed to free virtual memory");
|
||||
LOG_CRITICAL(Core, "Failed to free virtual memory");
|
||||
}
|
||||
}
|
||||
if (backing_base) {
|
||||
if (!UnmapViewOfFile2(process, backing_base, MEM_PRESERVE_PLACEHOLDER)) {
|
||||
LOG_CRITICAL(Render, "Failed to unmap backing memory placeholder");
|
||||
LOG_CRITICAL(Core, "Failed to unmap backing memory placeholder");
|
||||
}
|
||||
if (!VirtualFreeEx(process, backing_base, 0, MEM_RELEASE)) {
|
||||
LOG_CRITICAL(Render, "Failed to free backing memory");
|
||||
LOG_CRITICAL(Core, "Failed to free backing memory");
|
||||
}
|
||||
}
|
||||
if (!CloseHandle(backing_handle)) {
|
||||
LOG_CRITICAL(Render, "Failed to free backing memory file handle");
|
||||
LOG_CRITICAL(Core, "Failed to free backing memory file handle");
|
||||
}
|
||||
}
|
||||
|
||||
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot, uintptr_t fd = 0) {
|
||||
// Before mapping we must carve a placeholder with the exact properties of our mapping.
|
||||
auto* region = EnsureSplitRegionForMapping(virtual_addr, size);
|
||||
region->is_mapped = true;
|
||||
void* MapRegion(MemoryRegion* region) {
|
||||
VAddr virtual_addr = region->base;
|
||||
PAddr phys_addr = region->phys_base;
|
||||
u64 size = region->size;
|
||||
ULONG prot = region->prot;
|
||||
s32 fd = region->fd;
|
||||
|
||||
void* ptr = nullptr;
|
||||
if (phys_addr != -1) {
|
||||
HANDLE backing = fd ? reinterpret_cast<HANDLE>(fd) : backing_handle;
|
||||
HANDLE backing = fd != -1 ? reinterpret_cast<HANDLE>(fd) : backing_handle;
|
||||
if (fd && prot == PAGE_READONLY) {
|
||||
DWORD resultvar;
|
||||
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
||||
@@ -257,110 +264,136 @@ struct AddressSpace::Impl {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void Unmap(VAddr virtual_addr, size_t size, bool has_backing) {
|
||||
bool ret;
|
||||
if (has_backing) {
|
||||
void UnmapRegion(MemoryRegion* region) {
|
||||
VAddr virtual_addr = region->base;
|
||||
PAddr phys_base = region->phys_base;
|
||||
u64 size = region->size;
|
||||
|
||||
bool ret = false;
|
||||
if (phys_base != -1) {
|
||||
ret = UnmapViewOfFile2(process, reinterpret_cast<PVOID>(virtual_addr),
|
||||
MEM_PRESERVE_PLACEHOLDER);
|
||||
} else {
|
||||
ret = VirtualFreeEx(process, reinterpret_cast<PVOID>(virtual_addr), size,
|
||||
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
|
||||
}
|
||||
ASSERT_MSG(ret, "Unmap operation on virtual_addr={:#X} failed: {}", virtual_addr,
|
||||
ASSERT_MSG(ret, "Unmap on virtual_addr {:#x}, size {:#x} failed: {}", virtual_addr, size,
|
||||
Common::GetLastErrorMsg());
|
||||
|
||||
// The unmap call will create a new placeholder region. We need to see if we can coalesce it
|
||||
// with neighbors.
|
||||
JoinRegionsAfterUnmap(virtual_addr, size);
|
||||
}
|
||||
|
||||
// The following code is inspired from Dolphin's MemArena
|
||||
// https://github.com/dolphin-emu/dolphin/blob/deee3ee4/Source/Core/Common/MemArenaWin.cpp#L212
|
||||
MemoryRegion* EnsureSplitRegionForMapping(VAddr address, size_t size) {
|
||||
// Find closest region that is <= the given address by using upper bound and decrementing
|
||||
auto it = regions.upper_bound(address);
|
||||
ASSERT_MSG(it != regions.begin(), "Invalid address {:#x}", address);
|
||||
--it;
|
||||
ASSERT_MSG(!it->second.is_mapped,
|
||||
"Attempt to map {:#x} with size {:#x} which overlaps with {:#x} mapping",
|
||||
address, size, it->second.base);
|
||||
auto& [base, region] = *it;
|
||||
void SplitRegion(VAddr virtual_addr, u64 size) {
|
||||
// First, get the region this range covers
|
||||
auto it = std::prev(regions.upper_bound(virtual_addr));
|
||||
|
||||
const VAddr mapping_address = region.base;
|
||||
const size_t region_size = region.size;
|
||||
if (mapping_address == address) {
|
||||
// If this region is already split up correctly we don't have to do anything
|
||||
if (region_size == size) {
|
||||
return ®ion;
|
||||
// All unmapped areas will coalesce, so there should be a region
|
||||
// containing the full requested range. If not, then something is mapped here.
|
||||
ASSERT_MSG(it->second.base + it->second.size >= virtual_addr + size,
|
||||
"Cannot fit region into one placeholder");
|
||||
|
||||
// If the region is mapped, we need to unmap first before we can modify the placeholders.
|
||||
if (it->second.is_mapped) {
|
||||
ASSERT_MSG(it->second.phys_base != -1 || !it->second.is_mapped,
|
||||
"Cannot split unbacked mapping");
|
||||
UnmapRegion(&it->second);
|
||||
}
|
||||
|
||||
// We need to split this region to create a matching placeholder.
|
||||
if (it->second.base != virtual_addr) {
|
||||
// Requested address is not the start of the containing region,
|
||||
// create a new region to represent the memory before the requested range.
|
||||
auto& region = it->second;
|
||||
u64 base_offset = virtual_addr - region.base;
|
||||
u64 next_region_size = region.size - base_offset;
|
||||
PAddr next_region_phys_base = -1;
|
||||
if (region.is_mapped) {
|
||||
next_region_phys_base = region.phys_base + base_offset;
|
||||
}
|
||||
region.size = base_offset;
|
||||
|
||||
ASSERT_MSG(region_size >= size,
|
||||
"Region with address {:#x} and size {:#x} can't fit {:#x}", mapping_address,
|
||||
region_size, size);
|
||||
|
||||
// Split the placeholder.
|
||||
if (!VirtualFreeEx(process, LPVOID(address), size,
|
||||
// Use VirtualFreeEx to create the split.
|
||||
if (!VirtualFreeEx(process, LPVOID(region.base), region.size,
|
||||
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
|
||||
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Update tracked mappings and return the first of the two
|
||||
// If the mapping was mapped, remap the region.
|
||||
if (region.is_mapped) {
|
||||
MapRegion(®ion);
|
||||
}
|
||||
|
||||
// Store a new region matching the removed area.
|
||||
it = regions.emplace_hint(std::next(it), virtual_addr,
|
||||
MemoryRegion(virtual_addr, next_region_phys_base,
|
||||
next_region_size, region.prot, region.fd,
|
||||
region.is_mapped));
|
||||
}
|
||||
|
||||
// At this point, the region's base will match virtual_addr.
|
||||
// Now check for a size difference.
|
||||
if (it->second.size != size) {
|
||||
// The requested size is smaller than the current region placeholder.
|
||||
// Update region to match the requested region,
|
||||
// then make a new region to represent the remaining space.
|
||||
auto& region = it->second;
|
||||
VAddr next_region_addr = region.base + size;
|
||||
u64 next_region_size = region.size - size;
|
||||
PAddr next_region_phys_base = -1;
|
||||
if (region.is_mapped) {
|
||||
next_region_phys_base = region.phys_base + size;
|
||||
}
|
||||
region.size = size;
|
||||
const VAddr new_mapping_start = address + size;
|
||||
regions.emplace_hint(std::next(it), new_mapping_start,
|
||||
MemoryRegion(new_mapping_start, region_size - size, false));
|
||||
return ®ion;
|
||||
|
||||
// Store the new region matching the remaining space
|
||||
regions.emplace_hint(std::next(it), next_region_addr,
|
||||
MemoryRegion(next_region_addr, next_region_phys_base,
|
||||
next_region_size, region.prot, region.fd,
|
||||
region.is_mapped));
|
||||
|
||||
// Use VirtualFreeEx to create the split.
|
||||
if (!VirtualFreeEx(process, LPVOID(region.base), region.size,
|
||||
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
|
||||
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
|
||||
}
|
||||
|
||||
// If these regions were mapped, then map the unmapped area beyond the requested range.
|
||||
if (region.is_mapped) {
|
||||
MapRegion(&std::next(it)->second);
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(mapping_address < address);
|
||||
|
||||
// Is there enough space to map this?
|
||||
const size_t offset_in_region = address - mapping_address;
|
||||
const size_t minimum_size = size + offset_in_region;
|
||||
ASSERT(region_size >= minimum_size);
|
||||
|
||||
// Split the placeholder.
|
||||
if (!VirtualFreeEx(process, LPVOID(address), size,
|
||||
MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
|
||||
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Do we now have two regions or three regions?
|
||||
if (region_size == minimum_size) {
|
||||
// Split into two; update tracked mappings and return the second one
|
||||
region.size = offset_in_region;
|
||||
it = regions.emplace_hint(std::next(it), address, MemoryRegion(address, size, false));
|
||||
return &it->second;
|
||||
} else {
|
||||
// Split into three; update tracked mappings and return the middle one
|
||||
region.size = offset_in_region;
|
||||
const VAddr middle_mapping_start = address;
|
||||
const size_t middle_mapping_size = size;
|
||||
const VAddr after_mapping_start = address + size;
|
||||
const size_t after_mapping_size = region_size - minimum_size;
|
||||
it = regions.emplace_hint(std::next(it), after_mapping_start,
|
||||
MemoryRegion(after_mapping_start, after_mapping_size, false));
|
||||
it = regions.emplace_hint(
|
||||
it, middle_mapping_start,
|
||||
MemoryRegion(middle_mapping_start, middle_mapping_size, false));
|
||||
return &it->second;
|
||||
// If the requested region was mapped, remap it.
|
||||
if (it->second.is_mapped) {
|
||||
MapRegion(&it->second);
|
||||
}
|
||||
}
|
||||
|
||||
void JoinRegionsAfterUnmap(VAddr address, size_t size) {
|
||||
// There should be a mapping that matches the request exactly, find it
|
||||
auto it = regions.find(address);
|
||||
ASSERT_MSG(it != regions.end() && it->second.size == size,
|
||||
"Invalid address/size given to unmap.");
|
||||
void* Map(VAddr virtual_addr, PAddr phys_addr, u64 size, ULONG prot, s32 fd = -1) {
|
||||
// Split surrounding regions to create a placeholder
|
||||
SplitRegion(virtual_addr, size);
|
||||
|
||||
// Get the region this range covers
|
||||
auto it = std::prev(regions.upper_bound(virtual_addr));
|
||||
auto& [base, region] = *it;
|
||||
region.is_mapped = false;
|
||||
|
||||
ASSERT_MSG(!region.is_mapped, "Cannot overwrite mapped region");
|
||||
|
||||
// Now we have a region matching the requested region, perform the actual mapping.
|
||||
region.is_mapped = true;
|
||||
region.phys_base = phys_addr;
|
||||
region.prot = prot;
|
||||
region.fd = fd;
|
||||
return MapRegion(®ion);
|
||||
}
|
||||
|
||||
void CoalesceFreeRegions(VAddr virtual_addr) {
|
||||
// First, get the region to update
|
||||
auto it = std::prev(regions.upper_bound(virtual_addr));
|
||||
ASSERT_MSG(!it->second.is_mapped, "Cannot coalesce mapped regions");
|
||||
|
||||
// Check if a placeholder exists right before us.
|
||||
auto it_prev = it != regions.begin() ? std::prev(it) : regions.end();
|
||||
if (it_prev != regions.end() && !it_prev->second.is_mapped) {
|
||||
const size_t total_size = it_prev->second.size + size;
|
||||
const u64 total_size = it_prev->second.size + it->second.size;
|
||||
if (!VirtualFreeEx(process, LPVOID(it_prev->first), total_size,
|
||||
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
|
||||
UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
|
||||
@@ -374,7 +407,7 @@ struct AddressSpace::Impl {
|
||||
// Check if a placeholder exists right after us.
|
||||
auto it_next = std::next(it);
|
||||
if (it_next != regions.end() && !it_next->second.is_mapped) {
|
||||
const size_t total_size = it->second.size + it_next->second.size;
|
||||
const u64 total_size = it->second.size + it_next->second.size;
|
||||
if (!VirtualFreeEx(process, LPVOID(it->first), total_size,
|
||||
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
|
||||
UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
|
||||
@@ -385,7 +418,47 @@ struct AddressSpace::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||
void Unmap(VAddr virtual_addr, u64 size) {
|
||||
// Loop through all regions in the requested range
|
||||
u64 remaining_size = size;
|
||||
VAddr current_addr = virtual_addr;
|
||||
while (remaining_size > 0) {
|
||||
// Get the region containing our current address.
|
||||
auto it = std::prev(regions.upper_bound(current_addr));
|
||||
|
||||
// If necessary, split regions to ensure a valid unmap.
|
||||
// To prevent complication, ensure size is within the bounds of the current region.
|
||||
u64 base_offset = current_addr - it->second.base;
|
||||
u64 size_to_unmap = std::min<u64>(it->second.size - base_offset, remaining_size);
|
||||
if (current_addr != it->second.base || size_to_unmap != it->second.size) {
|
||||
SplitRegion(current_addr, size_to_unmap);
|
||||
}
|
||||
|
||||
// Repair the region pointer, as SplitRegion modifies the regions map.
|
||||
it = std::prev(regions.upper_bound(current_addr));
|
||||
auto& [base, region] = *it;
|
||||
|
||||
// Unmap the region if it was previously mapped
|
||||
if (region.is_mapped) {
|
||||
UnmapRegion(®ion);
|
||||
}
|
||||
|
||||
// Update region data
|
||||
region.is_mapped = false;
|
||||
region.fd = -1;
|
||||
region.phys_base = -1;
|
||||
region.prot = PAGE_NOACCESS;
|
||||
|
||||
// Coalesce any free space
|
||||
CoalesceFreeRegions(current_addr);
|
||||
|
||||
// Update loop variables
|
||||
remaining_size -= size_to_unmap;
|
||||
current_addr += size_to_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
void Protect(VAddr virtual_addr, u64 size, bool read, bool write, bool execute) {
|
||||
DWORD new_flags{};
|
||||
|
||||
if (write && !read) {
|
||||
@@ -415,7 +488,7 @@ struct AddressSpace::Impl {
|
||||
|
||||
// If no flags are assigned, then something's gone wrong.
|
||||
if (new_flags == 0) {
|
||||
LOG_CRITICAL(Common_Memory,
|
||||
LOG_CRITICAL(Core,
|
||||
"Unsupported protection flag combination for address {:#x}, size {}, "
|
||||
"read={}, write={}, execute={}",
|
||||
virtual_addr, size, read, write, execute);
|
||||
@@ -429,8 +502,8 @@ struct AddressSpace::Impl {
|
||||
continue;
|
||||
}
|
||||
const auto& region = it->second;
|
||||
const size_t range_addr = std::max(region.base, virtual_addr);
|
||||
const size_t range_size = std::min(region.base + region.size, virtual_end) - range_addr;
|
||||
const u64 range_addr = std::max(region.base, virtual_addr);
|
||||
const u64 range_size = std::min(region.base + region.size, virtual_end) - range_addr;
|
||||
DWORD old_flags{};
|
||||
if (!VirtualProtectEx(process, LPVOID(range_addr), range_size, new_flags, &old_flags)) {
|
||||
UNREACHABLE_MSG(
|
||||
@@ -453,11 +526,11 @@ struct AddressSpace::Impl {
|
||||
u8* backing_base{};
|
||||
u8* virtual_base{};
|
||||
u8* system_managed_base{};
|
||||
size_t system_managed_size{};
|
||||
u64 system_managed_size{};
|
||||
u8* system_reserved_base{};
|
||||
size_t system_reserved_size{};
|
||||
u64 system_reserved_size{};
|
||||
u8* user_base{};
|
||||
size_t user_size{};
|
||||
u64 user_size{};
|
||||
std::map<VAddr, MemoryRegion> regions;
|
||||
};
|
||||
#else
|
||||
@@ -601,7 +674,7 @@ struct AddressSpace::Impl {
|
||||
}
|
||||
}
|
||||
|
||||
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, PosixPageProtection prot,
|
||||
void* Map(VAddr virtual_addr, PAddr phys_addr, u64 size, PosixPageProtection prot,
|
||||
int fd = -1) {
|
||||
m_free_regions.subtract({virtual_addr, virtual_addr + size});
|
||||
const int handle = phys_addr != -1 ? (fd == -1 ? backing_fd : fd) : -1;
|
||||
@@ -613,10 +686,10 @@ struct AddressSpace::Impl {
|
||||
return ret;
|
||||
}
|
||||
|
||||
void Unmap(VAddr virtual_addr, size_t size, bool) {
|
||||
void Unmap(VAddr virtual_addr, u64 size, bool) {
|
||||
// Check to see if we are adjacent to any regions.
|
||||
auto start_address = virtual_addr;
|
||||
auto end_address = start_address + size;
|
||||
VAddr start_address = virtual_addr;
|
||||
VAddr end_address = start_address + size;
|
||||
auto it = m_free_regions.find({start_address - 1, end_address + 1});
|
||||
|
||||
// If we are, join with them, ensuring we stay in bounds.
|
||||
@@ -634,7 +707,7 @@ struct AddressSpace::Impl {
|
||||
ASSERT_MSG(ret != MAP_FAILED, "mmap failed: {}", strerror(errno));
|
||||
}
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
|
||||
void Protect(VAddr virtual_addr, u64 size, bool read, bool write, bool execute) {
|
||||
int flags = PROT_NONE;
|
||||
if (read) {
|
||||
flags |= PROT_READ;
|
||||
@@ -654,11 +727,11 @@ struct AddressSpace::Impl {
|
||||
int backing_fd;
|
||||
u8* backing_base{};
|
||||
u8* system_managed_base{};
|
||||
size_t system_managed_size{};
|
||||
u64 system_managed_size{};
|
||||
u8* system_reserved_base{};
|
||||
size_t system_reserved_size{};
|
||||
u64 system_reserved_size{};
|
||||
u8* user_base{};
|
||||
size_t user_size{};
|
||||
u64 user_size{};
|
||||
boost::icl::interval_set<VAddr> m_free_regions;
|
||||
};
|
||||
#endif
|
||||
@@ -675,8 +748,7 @@ AddressSpace::AddressSpace() : impl{std::make_unique<Impl>()} {
|
||||
|
||||
AddressSpace::~AddressSpace() = default;
|
||||
|
||||
void* AddressSpace::Map(VAddr virtual_addr, size_t size, u64 alignment, PAddr phys_addr,
|
||||
bool is_exec) {
|
||||
void* AddressSpace::Map(VAddr virtual_addr, u64 size, PAddr phys_addr, bool is_exec) {
|
||||
#if ARCH_X86_64
|
||||
const auto prot = is_exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
|
||||
#else
|
||||
@@ -687,8 +759,7 @@ void* AddressSpace::Map(VAddr virtual_addr, size_t size, u64 alignment, PAddr ph
|
||||
return impl->Map(virtual_addr, phys_addr, size, prot);
|
||||
}
|
||||
|
||||
void* AddressSpace::MapFile(VAddr virtual_addr, size_t size, size_t offset, u32 prot,
|
||||
uintptr_t fd) {
|
||||
void* AddressSpace::MapFile(VAddr virtual_addr, u64 size, u64 offset, u32 prot, uintptr_t fd) {
|
||||
#ifdef _WIN32
|
||||
return impl->Map(virtual_addr, offset, size,
|
||||
ToWindowsProt(std::bit_cast<Core::MemoryProt>(prot)), fd);
|
||||
@@ -698,31 +769,15 @@ void* AddressSpace::MapFile(VAddr virtual_addr, size_t size, size_t offset, u32
|
||||
#endif
|
||||
}
|
||||
|
||||
void AddressSpace::Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VAddr end_in_vma,
|
||||
PAddr phys_base, bool is_exec, bool has_backing, bool readonly_file) {
|
||||
void AddressSpace::Unmap(VAddr virtual_addr, u64 size, bool has_backing) {
|
||||
#ifdef _WIN32
|
||||
// There does not appear to be comparable support for partial unmapping on Windows.
|
||||
// Unfortunately, a least one title was found to require this. The workaround is to unmap
|
||||
// the entire allocation and remap the portions outside of the requested unmapping range.
|
||||
impl->Unmap(virtual_addr, size, has_backing && !readonly_file);
|
||||
|
||||
// TODO: Determine if any titles require partial unmapping support for un-backed allocations.
|
||||
ASSERT_MSG(has_backing || (start_in_vma == 0 && end_in_vma == size),
|
||||
"Partial unmapping of un-backed allocations is not supported");
|
||||
|
||||
if (start_in_vma != 0) {
|
||||
Map(virtual_addr, start_in_vma, 0, phys_base, is_exec);
|
||||
}
|
||||
|
||||
if (end_in_vma != size) {
|
||||
Map(virtual_addr + end_in_vma, size - end_in_vma, 0, phys_base + end_in_vma, is_exec);
|
||||
}
|
||||
impl->Unmap(virtual_addr, size);
|
||||
#else
|
||||
impl->Unmap(virtual_addr + start_in_vma, end_in_vma - start_in_vma, has_backing);
|
||||
impl->Unmap(virtual_addr, size, has_backing);
|
||||
#endif
|
||||
}
|
||||
|
||||
void AddressSpace::Protect(VAddr virtual_addr, size_t size, MemoryPermission perms) {
|
||||
void AddressSpace::Protect(VAddr virtual_addr, u64 size, MemoryPermission perms) {
|
||||
const bool read = True(perms & MemoryPermission::Read);
|
||||
const bool write = True(perms & MemoryPermission::Write);
|
||||
const bool execute = True(perms & MemoryPermission::Execute);
|
||||
|
||||
@@ -39,7 +39,7 @@ public:
|
||||
[[nodiscard]] const u8* SystemManagedVirtualBase() const noexcept {
|
||||
return system_managed_base;
|
||||
}
|
||||
[[nodiscard]] size_t SystemManagedVirtualSize() const noexcept {
|
||||
[[nodiscard]] u64 SystemManagedVirtualSize() const noexcept {
|
||||
return system_managed_size;
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
[[nodiscard]] const u8* SystemReservedVirtualBase() const noexcept {
|
||||
return system_reserved_base;
|
||||
}
|
||||
[[nodiscard]] size_t SystemReservedVirtualSize() const noexcept {
|
||||
[[nodiscard]] u64 SystemReservedVirtualSize() const noexcept {
|
||||
return system_reserved_size;
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ public:
|
||||
[[nodiscard]] const u8* UserVirtualBase() const noexcept {
|
||||
return user_base;
|
||||
}
|
||||
[[nodiscard]] size_t UserVirtualSize() const noexcept {
|
||||
[[nodiscard]] u64 UserVirtualSize() const noexcept {
|
||||
return user_size;
|
||||
}
|
||||
|
||||
@@ -73,17 +73,15 @@ public:
|
||||
* If zero is provided the mapping is considered as private.
|
||||
* @return A pointer to the mapped memory.
|
||||
*/
|
||||
void* Map(VAddr virtual_addr, size_t size, u64 alignment = 0, PAddr phys_addr = -1,
|
||||
bool exec = false);
|
||||
void* Map(VAddr virtual_addr, u64 size, PAddr phys_addr = -1, bool exec = false);
|
||||
|
||||
/// Memory maps a specified file descriptor.
|
||||
void* MapFile(VAddr virtual_addr, size_t size, size_t offset, u32 prot, uintptr_t fd);
|
||||
void* MapFile(VAddr virtual_addr, u64 size, u64 offset, u32 prot, uintptr_t fd);
|
||||
|
||||
/// Unmaps specified virtual memory area.
|
||||
void Unmap(VAddr virtual_addr, size_t size, VAddr start_in_vma, VAddr end_in_vma,
|
||||
PAddr phys_base, bool is_exec, bool has_backing, bool readonly_file);
|
||||
void Unmap(VAddr virtual_addr, u64 size, bool has_backing);
|
||||
|
||||
void Protect(VAddr virtual_addr, size_t size, MemoryPermission perms);
|
||||
void Protect(VAddr virtual_addr, u64 size, MemoryPermission perms);
|
||||
|
||||
// Returns an interval set containing all usable regions.
|
||||
boost::icl::interval_set<VAddr> GetUsableRegions();
|
||||
@@ -93,11 +91,11 @@ private:
|
||||
std::unique_ptr<Impl> impl;
|
||||
u8* backing_base{};
|
||||
u8* system_managed_base{};
|
||||
size_t system_managed_size{};
|
||||
u64 system_managed_size{};
|
||||
u8* system_reserved_base{};
|
||||
size_t system_reserved_size{};
|
||||
u64 system_reserved_size{};
|
||||
u8* user_base{};
|
||||
size_t user_size{};
|
||||
u64 user_size{};
|
||||
};
|
||||
|
||||
} // namespace Core
|
||||
|
||||
@@ -32,7 +32,7 @@ bool MemoryMapViewer::Iterator::DrawLine() {
|
||||
TableNextColumn();
|
||||
Text("%s", magic_enum::enum_name(m.prot).data());
|
||||
TableNextColumn();
|
||||
if (m.is_exec) {
|
||||
if (True(m.prot & MemoryProt::CpuExec)) {
|
||||
Text("X");
|
||||
}
|
||||
TableNextColumn();
|
||||
@@ -44,7 +44,7 @@ bool MemoryMapViewer::Iterator::DrawLine() {
|
||||
return false;
|
||||
}
|
||||
auto m = dmem.it->second;
|
||||
if (m.dma_type == DMAType::Free) {
|
||||
if (m.dma_type == PhysicalMemoryType::Free) {
|
||||
++dmem.it;
|
||||
return DrawLine();
|
||||
}
|
||||
@@ -56,7 +56,8 @@ bool MemoryMapViewer::Iterator::DrawLine() {
|
||||
auto type = static_cast<::Libraries::Kernel::MemoryTypes>(m.memory_type);
|
||||
Text("%s", magic_enum::enum_name(type).data());
|
||||
TableNextColumn();
|
||||
Text("%d", m.dma_type == DMAType::Pooled || m.dma_type == DMAType::Committed);
|
||||
Text("%d",
|
||||
m.dma_type == PhysicalMemoryType::Pooled || m.dma_type == PhysicalMemoryType::Committed);
|
||||
++dmem.it;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -11,8 +11,8 @@ class MemoryMapViewer {
|
||||
struct Iterator {
|
||||
bool is_vma;
|
||||
struct {
|
||||
MemoryManager::DMemMap::iterator it;
|
||||
MemoryManager::DMemMap::iterator end;
|
||||
MemoryManager::PhysMap::iterator it;
|
||||
MemoryManager::PhysMap::iterator end;
|
||||
} dmem;
|
||||
struct {
|
||||
MemoryManager::VMAMap::iterator it;
|
||||
|
||||
@@ -102,6 +102,7 @@ s32 PS4_SYSV_ABI sceKernelReleaseDirectMemory(u64 start, u64 len) {
|
||||
if (len == 0) {
|
||||
return ORBIS_OK;
|
||||
}
|
||||
LOG_INFO(Kernel_Vmm, "called start = {:#x}, len = {:#x}", start, len);
|
||||
auto* memory = Core::Memory::Instance();
|
||||
memory->Free(start, len);
|
||||
return ORBIS_OK;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,7 @@
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include "common/enum.h"
|
||||
#include "common/shared_first_mutex.h"
|
||||
#include "common/singleton.h"
|
||||
#include "common/types.h"
|
||||
#include "core/address_space.h"
|
||||
@@ -54,12 +55,37 @@ enum class MemoryMapFlags : u32 {
|
||||
};
|
||||
DECLARE_ENUM_FLAG_OPERATORS(MemoryMapFlags)
|
||||
|
||||
enum class DMAType : u32 {
|
||||
enum class PhysicalMemoryType : u32 {
|
||||
Free = 0,
|
||||
Allocated = 1,
|
||||
Mapped = 2,
|
||||
Pooled = 3,
|
||||
Committed = 4,
|
||||
Flexible = 5,
|
||||
};
|
||||
|
||||
struct PhysicalMemoryArea {
|
||||
PAddr base = 0;
|
||||
u64 size = 0;
|
||||
s32 memory_type = 0;
|
||||
PhysicalMemoryType dma_type = PhysicalMemoryType::Free;
|
||||
|
||||
PAddr GetEnd() const {
|
||||
return base + size;
|
||||
}
|
||||
|
||||
bool CanMergeWith(const PhysicalMemoryArea& next) const {
|
||||
if (base + size != next.base) {
|
||||
return false;
|
||||
}
|
||||
if (memory_type != next.memory_type) {
|
||||
return false;
|
||||
}
|
||||
if (dma_type != next.dma_type) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
enum class VMAType : u32 {
|
||||
@@ -74,60 +100,15 @@ enum class VMAType : u32 {
|
||||
File = 8,
|
||||
};
|
||||
|
||||
struct DirectMemoryArea {
|
||||
PAddr base = 0;
|
||||
u64 size = 0;
|
||||
s32 memory_type = 0;
|
||||
DMAType dma_type = DMAType::Free;
|
||||
|
||||
PAddr GetEnd() const {
|
||||
return base + size;
|
||||
}
|
||||
|
||||
bool CanMergeWith(const DirectMemoryArea& next) const {
|
||||
if (base + size != next.base) {
|
||||
return false;
|
||||
}
|
||||
if (memory_type != next.memory_type) {
|
||||
return false;
|
||||
}
|
||||
if (dma_type != next.dma_type) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct FlexibleMemoryArea {
|
||||
PAddr base = 0;
|
||||
u64 size = 0;
|
||||
bool is_free = true;
|
||||
|
||||
PAddr GetEnd() const {
|
||||
return base + size;
|
||||
}
|
||||
|
||||
bool CanMergeWith(const FlexibleMemoryArea& next) const {
|
||||
if (base + size != next.base) {
|
||||
return false;
|
||||
}
|
||||
if (is_free != next.is_free) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
struct VirtualMemoryArea {
|
||||
VAddr base = 0;
|
||||
u64 size = 0;
|
||||
PAddr phys_base = 0;
|
||||
std::map<uintptr_t, PhysicalMemoryArea> phys_areas;
|
||||
VMAType type = VMAType::Free;
|
||||
MemoryProt prot = MemoryProt::NoAccess;
|
||||
bool disallow_merge = false;
|
||||
std::string name = "";
|
||||
uintptr_t fd = 0;
|
||||
bool is_exec = false;
|
||||
s32 fd = 0;
|
||||
bool disallow_merge = false;
|
||||
|
||||
bool Contains(VAddr addr, u64 size) const {
|
||||
return addr >= base && (addr + size) <= (base + this->size);
|
||||
@@ -141,30 +122,32 @@ struct VirtualMemoryArea {
|
||||
return type != VMAType::Free && type != VMAType::Reserved && type != VMAType::PoolReserved;
|
||||
}
|
||||
|
||||
bool CanMergeWith(const VirtualMemoryArea& next) const {
|
||||
bool CanMergeWith(VirtualMemoryArea& next) {
|
||||
if (disallow_merge || next.disallow_merge) {
|
||||
return false;
|
||||
}
|
||||
if (base + size != next.base) {
|
||||
return false;
|
||||
}
|
||||
if ((type == VMAType::Direct || type == VMAType::Flexible || type == VMAType::Pooled) &&
|
||||
phys_base + size != next.phys_base) {
|
||||
return false;
|
||||
if (type == VMAType::Direct && next.type == VMAType::Direct) {
|
||||
auto& last_phys = std::prev(phys_areas.end())->second;
|
||||
auto& first_next_phys = next.phys_areas.begin()->second;
|
||||
if (last_phys.base + last_phys.size != first_next_phys.base ||
|
||||
last_phys.memory_type != first_next_phys.memory_type) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (prot != next.prot || type != next.type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
class MemoryManager {
|
||||
using DMemMap = std::map<PAddr, DirectMemoryArea>;
|
||||
using DMemHandle = DMemMap::iterator;
|
||||
|
||||
using FMemMap = std::map<PAddr, FlexibleMemoryArea>;
|
||||
using FMemHandle = FMemMap::iterator;
|
||||
using PhysMap = std::map<PAddr, PhysicalMemoryArea>;
|
||||
using PhysHandle = PhysMap::iterator;
|
||||
|
||||
using VMAMap = std::map<VAddr, VirtualMemoryArea>;
|
||||
using VMAHandle = VMAMap::iterator;
|
||||
@@ -220,10 +203,11 @@ public:
|
||||
// Now make sure the full address range is contained in vma_map.
|
||||
auto vma_handle = FindVMA(virtual_addr);
|
||||
auto addr_to_check = virtual_addr;
|
||||
s64 size_to_validate = size;
|
||||
u64 size_to_validate = size;
|
||||
while (vma_handle != vma_map.end() && size_to_validate > 0) {
|
||||
const auto offset_in_vma = addr_to_check - vma_handle->second.base;
|
||||
const auto size_in_vma = vma_handle->second.size - offset_in_vma;
|
||||
const auto size_in_vma =
|
||||
std::min<u64>(vma_handle->second.size - offset_in_vma, size_to_validate);
|
||||
size_to_validate -= size_in_vma;
|
||||
addr_to_check += size_in_vma;
|
||||
vma_handle++;
|
||||
@@ -245,7 +229,7 @@ public:
|
||||
|
||||
void CopySparseMemory(VAddr source, u8* dest, u64 size);
|
||||
|
||||
bool TryWriteBacking(void* address, const void* data, u32 num_bytes);
|
||||
bool TryWriteBacking(void* address, const void* data, u64 size);
|
||||
|
||||
void SetupMemoryRegions(u64 flexible_size, bool use_extended_mem1, bool use_extended_mem2);
|
||||
|
||||
@@ -300,34 +284,14 @@ private:
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
}
|
||||
|
||||
DMemHandle FindDmemArea(PAddr target) {
|
||||
PhysHandle FindDmemArea(PAddr target) {
|
||||
return std::prev(dmem_map.upper_bound(target));
|
||||
}
|
||||
|
||||
FMemHandle FindFmemArea(PAddr target) {
|
||||
PhysHandle FindFmemArea(PAddr target) {
|
||||
return std::prev(fmem_map.upper_bound(target));
|
||||
}
|
||||
|
||||
template <typename Handle>
|
||||
Handle MergeAdjacent(auto& handle_map, Handle iter) {
|
||||
const auto next_vma = std::next(iter);
|
||||
if (next_vma != handle_map.end() && iter->second.CanMergeWith(next_vma->second)) {
|
||||
iter->second.size += next_vma->second.size;
|
||||
handle_map.erase(next_vma);
|
||||
}
|
||||
|
||||
if (iter != handle_map.begin()) {
|
||||
auto prev_vma = std::prev(iter);
|
||||
if (prev_vma->second.CanMergeWith(iter->second)) {
|
||||
prev_vma->second.size += iter->second.size;
|
||||
handle_map.erase(iter);
|
||||
iter = prev_vma;
|
||||
}
|
||||
}
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
bool HasPhysicalBacking(VirtualMemoryArea vma) {
|
||||
return vma.type == VMAType::Direct || vma.type == VMAType::Flexible ||
|
||||
vma.type == VMAType::Pooled;
|
||||
@@ -335,17 +299,17 @@ private:
|
||||
|
||||
VAddr SearchFree(VAddr virtual_addr, u64 size, u32 alignment);
|
||||
|
||||
VMAHandle MergeAdjacent(VMAMap& map, VMAHandle iter);
|
||||
|
||||
PhysHandle MergeAdjacent(PhysMap& map, PhysHandle iter);
|
||||
|
||||
VMAHandle CarveVMA(VAddr virtual_addr, u64 size);
|
||||
|
||||
DMemHandle CarveDmemArea(PAddr addr, u64 size);
|
||||
|
||||
FMemHandle CarveFmemArea(PAddr addr, u64 size);
|
||||
PhysHandle CarvePhysArea(PhysMap& map, PAddr addr, u64 size);
|
||||
|
||||
VMAHandle Split(VMAHandle vma_handle, u64 offset_in_vma);
|
||||
|
||||
DMemHandle Split(DMemHandle dmem_handle, u64 offset_in_area);
|
||||
|
||||
FMemHandle Split(FMemHandle fmem_handle, u64 offset_in_area);
|
||||
PhysHandle Split(PhysMap& map, PhysHandle dmem_handle, u64 offset_in_area);
|
||||
|
||||
u64 UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size);
|
||||
|
||||
@@ -353,14 +317,15 @@ private:
|
||||
|
||||
private:
|
||||
AddressSpace impl;
|
||||
DMemMap dmem_map;
|
||||
FMemMap fmem_map;
|
||||
PhysMap dmem_map;
|
||||
PhysMap fmem_map;
|
||||
VMAMap vma_map;
|
||||
std::mutex mutex;
|
||||
Common::SharedFirstMutex mutex{};
|
||||
u64 total_direct_size{};
|
||||
u64 total_flexible_size{};
|
||||
u64 flexible_usage{};
|
||||
u64 pool_budget{};
|
||||
s32 sdk_version{};
|
||||
Vulkan::Rasterizer* rasterizer{};
|
||||
|
||||
struct PrtArea {
|
||||
|
||||
Reference in New Issue
Block a user