Core: More memory hotfixes (#3954)

* Update memory.cpp

* Fix CoalesceFreeRegions to account for address space gaps

Fixes a regression in Saint's Row games.
This commit is contained in:
Stephen Miller
2026-01-24 00:05:56 -06:00
committed by GitHub
parent 46a7c4e1f5
commit c8b45e5ebc
2 changed files with 8 additions and 15 deletions

View File

@@ -399,10 +399,11 @@ struct AddressSpace::Impl {
auto it = std::prev(regions.upper_bound(virtual_addr)); auto it = std::prev(regions.upper_bound(virtual_addr));
ASSERT_MSG(!it->second.is_mapped, "Cannot coalesce mapped regions"); ASSERT_MSG(!it->second.is_mapped, "Cannot coalesce mapped regions");
// Check if there are free placeholders before this area. // Check if there are adjacent free placeholders before this area.
bool can_coalesce = false; bool can_coalesce = false;
auto it_prev = it != regions.begin() ? std::prev(it) : regions.end(); auto it_prev = it != regions.begin() ? std::prev(it) : regions.end();
while (it_prev != regions.end() && !it_prev->second.is_mapped) { while (it_prev != regions.end() && !it_prev->second.is_mapped &&
it_prev->first + it_prev->second.size == it->first) {
// If there is an earlier region, move our iterator to that and increase size. // If there is an earlier region, move our iterator to that and increase size.
it_prev->second.size = it_prev->second.size + it->second.size; it_prev->second.size = it_prev->second.size + it->second.size;
regions.erase(it); regions.erase(it);
@@ -415,9 +416,10 @@ struct AddressSpace::Impl {
it_prev = it != regions.begin() ? std::prev(it) : regions.end(); it_prev = it != regions.begin() ? std::prev(it) : regions.end();
} }
// Check if there are free placeholders after this area. // Check if there are adjacent free placeholders after this area.
auto it_next = std::next(it); auto it_next = std::next(it);
while (it_next != regions.end() && !it_next->second.is_mapped) { while (it_next != regions.end() && !it_next->second.is_mapped &&
it->first + it->second.size == it_next->first) {
// If there is a later region, increase our current region's size // If there is a later region, increase our current region's size
it->second.size = it->second.size + it_next->second.size; it->second.size = it->second.size + it_next->second.size;
regions.erase(it_next); regions.erase(it_next);

View File

@@ -117,7 +117,6 @@ void MemoryManager::SetPrtArea(u32 id, VAddr address, u64 size) {
} }
void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) { void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) {
std::shared_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -138,7 +137,6 @@ void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) {
bool MemoryManager::TryWriteBacking(void* address, const void* data, u64 size) { bool MemoryManager::TryWriteBacking(void* address, const void* data, u64 size) {
const VAddr virtual_addr = std::bit_cast<VAddr>(address); const VAddr virtual_addr = std::bit_cast<VAddr>(address);
std::shared_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -701,7 +699,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
} }
s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) { s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
mutex.lock(); std::scoped_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -710,7 +708,6 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
while (it != vma_map.end() && it->second.base + it->second.size <= virtual_addr + size) { while (it != vma_map.end() && it->second.base + it->second.size <= virtual_addr + size) {
if (it->second.type != VMAType::PoolReserved && it->second.type != VMAType::Pooled) { if (it->second.type != VMAType::PoolReserved && it->second.type != VMAType::Pooled) {
LOG_ERROR(Kernel_Vmm, "Attempting to decommit non-pooled memory!"); LOG_ERROR(Kernel_Vmm, "Attempting to decommit non-pooled memory!");
mutex.unlock();
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
it++; it++;
@@ -728,9 +725,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
if (vma_base.type == VMAType::Pooled) { if (vma_base.type == VMAType::Pooled) {
// We always map PoolCommitted memory to GPU, so unmap when decomitting. // We always map PoolCommitted memory to GPU, so unmap when decomitting.
if (IsValidGpuMapping(current_addr, size_in_vma)) { if (IsValidGpuMapping(current_addr, size_in_vma)) {
mutex.unlock();
rasterizer->UnmapMemory(current_addr, size_in_vma); rasterizer->UnmapMemory(current_addr, size_in_vma);
mutex.lock();
} }
// Track how much pooled memory is decommitted // Track how much pooled memory is decommitted
@@ -781,7 +776,6 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
// Tracy memory tracking breaks from merging memory areas. Disabled for now. // Tracy memory tracking breaks from merging memory areas. Disabled for now.
// TRACK_FREE(virtual_addr, "VMEM"); // TRACK_FREE(virtual_addr, "VMEM");
mutex.unlock();
return ORBIS_OK; return ORBIS_OK;
} }
@@ -789,13 +783,12 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {
if (size == 0) { if (size == 0) {
return ORBIS_OK; return ORBIS_OK;
} }
mutex.lock(); std::scoped_lock lk{mutex};
virtual_addr = Common::AlignDown(virtual_addr, 16_KB); virtual_addr = Common::AlignDown(virtual_addr, 16_KB);
size = Common::AlignUp(size, 16_KB); size = Common::AlignUp(size, 16_KB);
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
u64 bytes_unmapped = UnmapMemoryImpl(virtual_addr, size); u64 bytes_unmapped = UnmapMemoryImpl(virtual_addr, size);
mutex.unlock();
return bytes_unmapped; return bytes_unmapped;
} }
@@ -873,9 +866,7 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
// If this mapping has GPU access, unmap from GPU. // If this mapping has GPU access, unmap from GPU.
if (IsValidGpuMapping(virtual_addr, size)) { if (IsValidGpuMapping(virtual_addr, size)) {
mutex.unlock();
rasterizer->UnmapMemory(virtual_addr, size); rasterizer->UnmapMemory(virtual_addr, size);
mutex.lock();
} }
} }
return size_in_vma; return size_in_vma;