Kernel.Vmm: Attempt to address race conditions involving ClampRangeSize, CopySparseMemory, and TryWriteBacking (#3956)

* no

no

* Adjust locking strategy

Use a separate mutex for the initial error checks + GPU unmap instead of using the reader lock. Make sure all writers lock this separate mutex, and for those that don't perform GPU unmaps, lock the writer lock immediately too.

This gets around every race condition I've envisioned so far, and hopefully does the trick?

* Clang

* Always GPU unmap

GPU unmaps have logic built-in to only run on mapped areas.
Not sure if userfaultfd would work with this, but since that's already broken anyway, I'll let reviewers decide that.

Without doing this, I'd need to do an extra pass through VMAs to find what all needs to be GPU modified before I can unmap from GPU, then perform remaining unmap work. Especially for places like MapMemory, that's a lot of code bloat.

* Fixups

* Update memory.cpp

* Rename mutex

It's really just a mutex for the sole purpose of dealing with GPU unmaps, so unmap_mutex is a bit more fitting than transition_mutex
This commit is contained in:
Stephen Miller
2026-01-27 04:25:23 -06:00
committed by GitHub
parent 514e363472
commit 4ba0e62670
5 changed files with 145 additions and 98 deletions

View File

@@ -17,6 +17,15 @@ public:
writer_active = true; writer_active = true;
} }
bool try_lock() {
std::lock_guard<std::mutex> lock(mtx);
if (writer_active || readers > 0) {
return false;
}
writer_active = true;
return true;
}
void unlock() { void unlock() {
std::lock_guard<std::mutex> lock(mtx); std::lock_guard<std::mutex> lock(mtx);
writer_active = false; writer_active = false;

View File

@@ -709,7 +709,7 @@ struct AddressSpace::Impl {
return ret; return ret;
} }
void Unmap(VAddr virtual_addr, u64 size, bool) { void Unmap(VAddr virtual_addr, u64 size) {
// Check to see if we are adjacent to any regions. // Check to see if we are adjacent to any regions.
VAddr start_address = virtual_addr; VAddr start_address = virtual_addr;
VAddr end_address = start_address + size; VAddr end_address = start_address + size;
@@ -792,12 +792,8 @@ void* AddressSpace::MapFile(VAddr virtual_addr, u64 size, u64 offset, u32 prot,
#endif #endif
} }
void AddressSpace::Unmap(VAddr virtual_addr, u64 size, bool has_backing) { void AddressSpace::Unmap(VAddr virtual_addr, u64 size) {
#ifdef _WIN32
impl->Unmap(virtual_addr, size); impl->Unmap(virtual_addr, size);
#else
impl->Unmap(virtual_addr, size, has_backing);
#endif
} }
void AddressSpace::Protect(VAddr virtual_addr, u64 size, MemoryPermission perms) { void AddressSpace::Protect(VAddr virtual_addr, u64 size, MemoryPermission perms) {

View File

@@ -79,8 +79,9 @@ public:
void* MapFile(VAddr virtual_addr, u64 size, u64 offset, u32 prot, uintptr_t fd); void* MapFile(VAddr virtual_addr, u64 size, u64 offset, u32 prot, uintptr_t fd);
/// Unmaps specified virtual memory area. /// Unmaps specified virtual memory area.
void Unmap(VAddr virtual_addr, u64 size, bool has_backing); void Unmap(VAddr virtual_addr, u64 size);
/// Protects requested region.
void Protect(VAddr virtual_addr, u64 size, MemoryPermission perms); void Protect(VAddr virtual_addr, u64 size, MemoryPermission perms);
// Returns an interval set containing all usable regions. // Returns an interval set containing all usable regions.

View File

@@ -79,6 +79,7 @@ u64 MemoryManager::ClampRangeSize(VAddr virtual_addr, u64 size) {
return size; return size;
} }
std::shared_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -117,6 +118,7 @@ void MemoryManager::SetPrtArea(u32 id, VAddr address, u64 size) {
} }
void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) { void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) {
std::shared_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -137,6 +139,7 @@ void MemoryManager::CopySparseMemory(VAddr virtual_addr, u8* dest, u64 size) {
bool MemoryManager::TryWriteBacking(void* address, const void* data, u64 size) { bool MemoryManager::TryWriteBacking(void* address, const void* data, u64 size) {
const VAddr virtual_addr = std::bit_cast<VAddr>(address); const VAddr virtual_addr = std::bit_cast<VAddr>(address);
std::shared_lock lk{mutex};
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -263,9 +266,7 @@ s32 MemoryManager::Free(PAddr phys_addr, u64 size, bool is_checked) {
return ORBIS_OK; return ORBIS_OK;
} }
// Lock mutex std::scoped_lock lk{unmap_mutex};
std::scoped_lock lk{mutex};
// If this is a checked free, then all direct memory in range must be allocated. // If this is a checked free, then all direct memory in range must be allocated.
std::vector<std::pair<PAddr, u64>> free_list; std::vector<std::pair<PAddr, u64>> free_list;
u64 remaining_size = size; u64 remaining_size = size;
@@ -316,6 +317,17 @@ s32 MemoryManager::Free(PAddr phys_addr, u64 size, bool is_checked) {
} }
} }
} }
// Early unmap from GPU to avoid deadlocking.
for (auto& [addr, unmap_size] : remove_list) {
if (IsValidGpuMapping(addr, unmap_size)) {
rasterizer->UnmapMemory(addr, unmap_size);
}
}
// Acquire writer lock
std::scoped_lock lk2{mutex};
for (const auto& [addr, size] : remove_list) { for (const auto& [addr, size] : remove_list) {
LOG_INFO(Kernel_Vmm, "Unmapping direct mapping {:#x} with size {:#x}", addr, size); LOG_INFO(Kernel_Vmm, "Unmapping direct mapping {:#x} with size {:#x}", addr, size);
UnmapMemoryImpl(addr, size); UnmapMemoryImpl(addr, size);
@@ -337,7 +349,7 @@ s32 MemoryManager::Free(PAddr phys_addr, u64 size, bool is_checked) {
} }
s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32 mtype) { s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32 mtype) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex, unmap_mutex};
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -429,54 +441,31 @@ s32 MemoryManager::PoolCommit(VAddr virtual_addr, u64 size, MemoryProt prot, s32
return ORBIS_OK; return ORBIS_OK;
} }
std::pair<s32, MemoryManager::VMAHandle> MemoryManager::CreateArea( MemoryManager::VMAHandle MemoryManager::CreateArea(VAddr virtual_addr, u64 size, MemoryProt prot,
VAddr virtual_addr, u64 size, MemoryProt prot, MemoryMapFlags flags, VMAType type, MemoryMapFlags flags, VMAType type,
std::string_view name, u64 alignment) { std::string_view name, u64 alignment) {
// Locate the VMA representing the requested region
// Limit the minimum address to SystemManagedVirtualBase to prevent hardware-specific issues. auto vma = FindVMA(virtual_addr)->second;
VAddr mapped_addr = (virtual_addr == 0) ? impl.SystemManagedVirtualBase() : virtual_addr; if (True(flags & MemoryMapFlags::Fixed)) {
// If fixed is specified, map directly to the region of virtual_addr + size.
// Fixed mapping means the virtual address must exactly match the provided one. // Callers should check to ensure the NoOverwrite flag is handled appropriately beforehand.
// On a PS4, the Fixed flag is ignored if address 0 is provided. auto unmap_addr = virtual_addr;
if (True(flags & MemoryMapFlags::Fixed) && virtual_addr != 0) {
ASSERT_MSG(IsValidMapping(mapped_addr, size), "Attempted to access invalid address {:#x}",
mapped_addr);
auto vma = FindVMA(mapped_addr)->second;
// There's a possible edge case where we're mapping to a partially reserved range.
// To account for this, unmap any reserved areas within this mapping range first.
auto unmap_addr = mapped_addr;
auto unmap_size = size; auto unmap_size = size;
while (unmap_size > 0) {
// If flag NoOverwrite is provided, don't overwrite mapped VMAs.
// When it isn't provided, VMAs can be overwritten regardless of if they're mapped.
while ((False(flags & MemoryMapFlags::NoOverwrite) || vma.IsFree()) &&
unmap_addr < mapped_addr + size) {
auto unmapped = UnmapBytesFromEntry(unmap_addr, vma, unmap_size); auto unmapped = UnmapBytesFromEntry(unmap_addr, vma, unmap_size);
unmap_addr += unmapped; unmap_addr += unmapped;
unmap_size -= unmapped; unmap_size -= unmapped;
vma = FindVMA(unmap_addr)->second; vma = FindVMA(unmap_addr)->second;
} }
vma = FindVMA(mapped_addr)->second;
auto remaining_size = vma.base + vma.size - mapped_addr;
if (!vma.IsFree() || remaining_size < size) {
LOG_ERROR(Kernel_Vmm, "Unable to map {:#x} bytes at address {:#x}", size, mapped_addr);
return {ORBIS_KERNEL_ERROR_ENOMEM, vma_map.end()};
}
} else {
// When MemoryMapFlags::Fixed is not specified, and mapped_addr is 0,
// search from address 0x200000000 instead.
alignment = alignment > 0 ? alignment : 16_KB;
mapped_addr = virtual_addr == 0 ? 0x200000000 : mapped_addr;
mapped_addr = SearchFree(mapped_addr, size, alignment);
if (mapped_addr == -1) {
// No suitable memory areas to map to
return {ORBIS_KERNEL_ERROR_ENOMEM, vma_map.end()};
}
} }
vma = FindVMA(virtual_addr)->second;
// By this point, vma should be free and ready to map.
// Caller performs address searches for non-fixed mappings before this.
ASSERT_MSG(vma.IsFree(), "VMA to map is not free");
// Create a memory area representing this mapping. // Create a memory area representing this mapping.
const auto new_vma_handle = CarveVMA(mapped_addr, size); const auto new_vma_handle = CarveVMA(virtual_addr, size);
auto& new_vma = new_vma_handle->second; auto& new_vma = new_vma_handle->second;
const bool is_exec = True(prot & MemoryProt::CpuExec); const bool is_exec = True(prot & MemoryProt::CpuExec);
if (True(prot & MemoryProt::CpuWrite)) { if (True(prot & MemoryProt::CpuWrite)) {
@@ -484,12 +473,13 @@ std::pair<s32, MemoryManager::VMAHandle> MemoryManager::CreateArea(
prot |= MemoryProt::CpuRead; prot |= MemoryProt::CpuRead;
} }
// Update VMA appropriately.
new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce); new_vma.disallow_merge = True(flags & MemoryMapFlags::NoCoalesce);
new_vma.prot = prot; new_vma.prot = prot;
new_vma.name = name; new_vma.name = name;
new_vma.type = type; new_vma.type = type;
new_vma.phys_areas.clear(); new_vma.phys_areas.clear();
return {ORBIS_OK, new_vma_handle}; return new_vma_handle;
} }
s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot, s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
@@ -504,8 +494,7 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
total_flexible_size - flexible_usage, size); total_flexible_size - flexible_usage, size);
return ORBIS_KERNEL_ERROR_EINVAL; return ORBIS_KERNEL_ERROR_EINVAL;
} }
std::scoped_lock lk{unmap_mutex};
std::scoped_lock lk{mutex};
PhysHandle dmem_area; PhysHandle dmem_area;
// Validate the requested physical address range // Validate the requested physical address range
@@ -538,12 +527,37 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
} }
} }
auto [result, new_vma_handle] = if (True(flags & MemoryMapFlags::Fixed) && True(flags & MemoryMapFlags::NoOverwrite)) {
CreateArea(virtual_addr, size, prot, flags, type, name, alignment); // Perform necessary error checking for Fixed & NoOverwrite case
if (result != ORBIS_OK) { ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
return result; virtual_addr);
auto vma = FindVMA(virtual_addr)->second;
auto remaining_size = vma.base + vma.size - virtual_addr;
if (!vma.IsFree() || remaining_size < size) {
LOG_ERROR(Kernel_Vmm, "Unable to map {:#x} bytes at address {:#x}", size, virtual_addr);
return ORBIS_KERNEL_ERROR_ENOMEM;
}
} else if (False(flags & MemoryMapFlags::Fixed)) {
// Find a free virtual addr to map
alignment = alignment > 0 ? alignment : 16_KB;
virtual_addr = virtual_addr == 0 ? DEFAULT_MAPPING_BASE : virtual_addr;
virtual_addr = SearchFree(virtual_addr, size, alignment);
if (virtual_addr == -1) {
// No suitable memory areas to map to
return ORBIS_KERNEL_ERROR_ENOMEM;
}
} }
// Perform early GPU unmap to avoid potential deadlocks
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Acquire writer lock.
std::scoped_lock lk2{mutex};
// Create VMA representing this mapping.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, type, name, alignment);
auto& new_vma = new_vma_handle->second; auto& new_vma = new_vma_handle->second;
auto mapped_addr = new_vma.base; auto mapped_addr = new_vma.base;
bool is_exec = True(prot & MemoryProt::CpuExec); bool is_exec = True(prot & MemoryProt::CpuExec);
@@ -590,7 +604,7 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
// Map the physical memory for this direct memory mapping. // Map the physical memory for this direct memory mapping.
auto phys_addr_to_search = phys_addr; auto phys_addr_to_search = phys_addr;
u64 remaining_size = size; u64 remaining_size = size;
dmem_area = FindDmemArea(phys_addr); auto dmem_area = FindDmemArea(phys_addr);
while (dmem_area != dmem_map.end() && remaining_size > 0) { while (dmem_area != dmem_map.end() && remaining_size > 0) {
// Carve a new dmem area in place of this one with the appropriate type. // Carve a new dmem area in place of this one with the appropriate type.
// Ensure the carved area only covers the current dmem area. // Ensure the carved area only covers the current dmem area.
@@ -638,14 +652,15 @@ s32 MemoryManager::MapMemory(void** out_addr, VAddr virtual_addr, u64 size, Memo
rasterizer->MapMemory(mapped_addr, size); rasterizer->MapMemory(mapped_addr, size);
} }
} }
return ORBIS_OK; return ORBIS_OK;
} }
s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot, s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, MemoryProt prot,
MemoryMapFlags flags, s32 fd, s64 phys_addr) { MemoryMapFlags flags, s32 fd, s64 phys_addr) {
std::scoped_lock lk{mutex}; uintptr_t handle = 0;
std::scoped_lock lk{unmap_mutex};
// Get the file to map // Get the file to map
auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance(); auto* h = Common::Singleton<Core::FileSys::HandleTable>::Instance();
auto file = h->GetFile(fd); auto file = h->GetFile(fd);
if (file == nullptr) { if (file == nullptr) {
@@ -663,12 +678,13 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
prot |= MemoryProt::CpuRead; prot |= MemoryProt::CpuRead;
} }
const auto handle = file->f.GetFileMapping(); handle = file->f.GetFileMapping();
if (False(file->f.GetAccessMode() & Common::FS::FileAccessMode::Write) || if (False(file->f.GetAccessMode() & Common::FS::FileAccessMode::Write) ||
False(file->f.GetAccessMode() & Common::FS::FileAccessMode::Append)) { False(file->f.GetAccessMode() & Common::FS::FileAccessMode::Append)) {
// If the file does not have write access, ensure prot does not contain write permissions. // If the file does not have write access, ensure prot does not contain write
// On real hardware, these mappings succeed, but the memory cannot be written to. // permissions. On real hardware, these mappings succeed, but the memory cannot be
// written to.
prot &= ~MemoryProt::CpuWrite; prot &= ~MemoryProt::CpuWrite;
} }
@@ -682,13 +698,38 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
prot &= ~MemoryProt::CpuExec; prot &= ~MemoryProt::CpuExec;
} }
auto [result, new_vma_handle] = if (True(flags & MemoryMapFlags::Fixed) && False(flags & MemoryMapFlags::NoOverwrite)) {
CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0); ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
if (result != ORBIS_OK) { virtual_addr);
return result; auto vma = FindVMA(virtual_addr)->second;
auto remaining_size = vma.base + vma.size - virtual_addr;
if (!vma.IsFree() || remaining_size < size) {
LOG_ERROR(Kernel_Vmm, "Unable to map {:#x} bytes at address {:#x}", size, virtual_addr);
return ORBIS_KERNEL_ERROR_ENOMEM;
}
} else if (False(flags & MemoryMapFlags::Fixed)) {
virtual_addr = virtual_addr == 0 ? DEFAULT_MAPPING_BASE : virtual_addr;
virtual_addr = SearchFree(virtual_addr, size, 16_KB);
if (virtual_addr == -1) {
// No suitable memory areas to map to
return ORBIS_KERNEL_ERROR_ENOMEM;
}
} }
// Perform early GPU unmap to avoid potential deadlocks
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Aquire writer lock
std::scoped_lock lk2{mutex};
// Update VMA map and map to address space.
auto new_vma_handle = CreateArea(virtual_addr, size, prot, flags, VMAType::File, "anon", 0);
auto& new_vma = new_vma_handle->second; auto& new_vma = new_vma_handle->second;
new_vma.fd = fd;
auto mapped_addr = new_vma.base; auto mapped_addr = new_vma.base;
bool is_exec = True(prot & MemoryProt::CpuExec); bool is_exec = True(prot & MemoryProt::CpuExec);
@@ -699,7 +740,7 @@ s32 MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, u64 size, Memory
} }
s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) { s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{unmap_mutex};
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
@@ -713,6 +754,14 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
it++; it++;
} }
// Perform early GPU unmap to avoid potential deadlocks
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Aquire writer mutex
std::scoped_lock lk2{mutex};
// Loop through all vmas in the area, unmap them. // Loop through all vmas in the area, unmap them.
u64 remaining_size = size; u64 remaining_size = size;
VAddr current_addr = virtual_addr; VAddr current_addr = virtual_addr;
@@ -721,13 +770,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
const auto& vma_base = handle->second; const auto& vma_base = handle->second;
const auto start_in_vma = current_addr - vma_base.base; const auto start_in_vma = current_addr - vma_base.base;
const auto size_in_vma = std::min<u64>(remaining_size, vma_base.size - start_in_vma); const auto size_in_vma = std::min<u64>(remaining_size, vma_base.size - start_in_vma);
if (vma_base.type == VMAType::Pooled) { if (vma_base.type == VMAType::Pooled) {
// We always map PoolCommitted memory to GPU, so unmap when decomitting.
if (IsValidGpuMapping(current_addr, size_in_vma)) {
rasterizer->UnmapMemory(current_addr, size_in_vma);
}
// Track how much pooled memory is decommitted // Track how much pooled memory is decommitted
pool_budget += size_in_vma; pool_budget += size_in_vma;
@@ -772,7 +815,7 @@ s32 MemoryManager::PoolDecommit(VAddr virtual_addr, u64 size) {
} }
// Unmap from address space // Unmap from address space
impl.Unmap(virtual_addr, size, true); impl.Unmap(virtual_addr, size);
// Tracy memory tracking breaks from merging memory areas. Disabled for now. // Tracy memory tracking breaks from merging memory areas. Disabled for now.
// TRACK_FREE(virtual_addr, "VMEM"); // TRACK_FREE(virtual_addr, "VMEM");
@@ -783,29 +826,32 @@ s32 MemoryManager::UnmapMemory(VAddr virtual_addr, u64 size) {
if (size == 0) { if (size == 0) {
return ORBIS_OK; return ORBIS_OK;
} }
std::scoped_lock lk{mutex};
std::scoped_lock lk{unmap_mutex};
// Align address and size appropriately
virtual_addr = Common::AlignDown(virtual_addr, 16_KB); virtual_addr = Common::AlignDown(virtual_addr, 16_KB);
size = Common::AlignUp(size, 16_KB); size = Common::AlignUp(size, 16_KB);
ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}", ASSERT_MSG(IsValidMapping(virtual_addr, size), "Attempted to access invalid address {:#x}",
virtual_addr); virtual_addr);
u64 bytes_unmapped = UnmapMemoryImpl(virtual_addr, size);
return bytes_unmapped; // If the requested range has GPU access, unmap from GPU.
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
// Acquire writer lock.
std::scoped_lock lk2{mutex};
return UnmapMemoryImpl(virtual_addr, size);
} }
u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) { u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma_base, u64 size) {
const auto start_in_vma = virtual_addr - vma_base.base; const auto start_in_vma = virtual_addr - vma_base.base;
const auto size_in_vma = std::min<u64>(vma_base.size - start_in_vma, size); const auto size_in_vma = std::min<u64>(vma_base.size - start_in_vma, size);
const auto vma_type = vma_base.type; const auto vma_type = vma_base.type;
const bool has_backing = HasPhysicalBacking(vma_base) || vma_base.type == VMAType::File;
const bool readonly_file =
vma_base.prot == MemoryProt::CpuRead && vma_base.type == VMAType::File;
const bool is_exec = True(vma_base.prot & MemoryProt::CpuExec);
if (vma_base.type == VMAType::Free || vma_base.type == VMAType::Pooled) { if (vma_base.type == VMAType::Free || vma_base.type == VMAType::Pooled) {
return size_in_vma; return size_in_vma;
} }
PAddr phys_base = 0;
VAddr current_addr = virtual_addr; VAddr current_addr = virtual_addr;
if (vma_base.phys_areas.size() > 0) { if (vma_base.phys_areas.size() > 0) {
u64 size_to_free = size_in_vma; u64 size_to_free = size_in_vma;
@@ -860,14 +906,9 @@ u64 MemoryManager::UnmapBytesFromEntry(VAddr virtual_addr, VirtualMemoryArea vma
if (vma_type != VMAType::Reserved && vma_type != VMAType::PoolReserved) { if (vma_type != VMAType::Reserved && vma_type != VMAType::PoolReserved) {
// Unmap the memory region. // Unmap the memory region.
impl.Unmap(virtual_addr, size_in_vma, has_backing); impl.Unmap(virtual_addr, size_in_vma);
// Tracy memory tracking breaks from merging memory areas. Disabled for now. // Tracy memory tracking breaks from merging memory areas. Disabled for now.
// TRACK_FREE(virtual_addr, "VMEM"); // TRACK_FREE(virtual_addr, "VMEM");
// If this mapping has GPU access, unmap from GPU.
if (IsValidGpuMapping(virtual_addr, size)) {
rasterizer->UnmapMemory(virtual_addr, size);
}
} }
return size_in_vma; return size_in_vma;
} }
@@ -983,7 +1024,7 @@ s32 MemoryManager::Protect(VAddr addr, u64 size, MemoryProt prot) {
} }
// Ensure the range to modify is valid // Ensure the range to modify is valid
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex, unmap_mutex};
ASSERT_MSG(IsValidMapping(addr, size), "Attempted to access invalid address {:#x}", addr); ASSERT_MSG(IsValidMapping(addr, size), "Attempted to access invalid address {:#x}", addr);
// Appropriately restrict flags. // Appropriately restrict flags.
@@ -1141,7 +1182,7 @@ s32 MemoryManager::DirectQueryAvailable(PAddr search_start, PAddr search_end, u6
} }
s32 MemoryManager::SetDirectMemoryType(VAddr addr, u64 size, s32 memory_type) { s32 MemoryManager::SetDirectMemoryType(VAddr addr, u64 size, s32 memory_type) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex, unmap_mutex};
ASSERT_MSG(IsValidMapping(addr, size), "Attempted to access invalid address {:#x}", addr); ASSERT_MSG(IsValidMapping(addr, size), "Attempted to access invalid address {:#x}", addr);
@@ -1188,7 +1229,7 @@ s32 MemoryManager::SetDirectMemoryType(VAddr addr, u64 size, s32 memory_type) {
} }
void MemoryManager::NameVirtualRange(VAddr virtual_addr, u64 size, std::string_view name) { void MemoryManager::NameVirtualRange(VAddr virtual_addr, u64 size, std::string_view name) {
std::scoped_lock lk{mutex}; std::scoped_lock lk{mutex, unmap_mutex};
// Sizes are aligned up to the nearest 16_KB // Sizes are aligned up to the nearest 16_KB
u64 aligned_size = Common::AlignUp(size, 16_KB); u64 aligned_size = Common::AlignUp(size, 16_KB);
@@ -1246,7 +1287,6 @@ s32 MemoryManager::IsStack(VAddr addr, void** start, void** end) {
ASSERT_MSG(IsValidMapping(addr), "Attempted to access invalid address {:#x}", addr); ASSERT_MSG(IsValidMapping(addr), "Attempted to access invalid address {:#x}", addr);
const auto& vma = FindVMA(addr)->second; const auto& vma = FindVMA(addr)->second;
if (vma.IsFree()) { if (vma.IsFree()) {
mutex.unlock_shared();
return ORBIS_KERNEL_ERROR_EACCES; return ORBIS_KERNEL_ERROR_EACCES;
} }

View File

@@ -28,6 +28,8 @@ class MemoryMapViewer;
namespace Core { namespace Core {
constexpr u64 DEFAULT_MAPPING_BASE = 0x200000000;
enum class MemoryProt : u32 { enum class MemoryProt : u32 {
NoAccess = 0, NoAccess = 0,
CpuRead = 1, CpuRead = 1,
@@ -304,10 +306,8 @@ private:
vma.type == VMAType::Pooled; vma.type == VMAType::Pooled;
} }
std::pair<s32, MemoryManager::VMAHandle> CreateArea(VAddr virtual_addr, u64 size, VMAHandle CreateArea(VAddr virtual_addr, u64 size, MemoryProt prot, MemoryMapFlags flags,
MemoryProt prot, MemoryMapFlags flags, VMAType type, std::string_view name, u64 alignment);
VMAType type, std::string_view name,
u64 alignment);
VAddr SearchFree(VAddr virtual_addr, u64 size, u32 alignment); VAddr SearchFree(VAddr virtual_addr, u64 size, u32 alignment);
@@ -333,6 +333,7 @@ private:
PhysMap fmem_map; PhysMap fmem_map;
VMAMap vma_map; VMAMap vma_map;
Common::SharedFirstMutex mutex{}; Common::SharedFirstMutex mutex{};
std::mutex unmap_mutex{};
u64 total_direct_size{}; u64 total_direct_size{};
u64 total_flexible_size{}; u64 total_flexible_size{};
u64 flexible_usage{}; u64 flexible_usage{};