windows: Address a bunch of address space problems
Some checks are pending
Build and Release / reuse (push) Waiting to run
Build and Release / clang-format (push) Waiting to run
Build and Release / get-info (push) Waiting to run
Build and Release / windows-sdl (push) Blocked by required conditions
Build and Release / windows-qt (push) Blocked by required conditions
Build and Release / macos-sdl (push) Blocked by required conditions
Build and Release / macos-qt (push) Blocked by required conditions
Build and Release / linux-sdl (push) Blocked by required conditions
Build and Release / linux-qt (push) Blocked by required conditions
Build and Release / pre-release (push) Blocked by required conditions

This commit is contained in:
IndecisiveTurtle 2024-11-09 15:29:09 +02:00
parent caadcde4ab
commit 0d2373177e
9 changed files with 163 additions and 96 deletions

View File

@ -40,6 +40,12 @@ static constexpr size_t BackingSize = SCE_KERNEL_MAIN_DMEM_SIZE_PRO;
}
}
struct MemoryRegion {
VAddr base;
size_t size;
bool is_mapped;
};
struct AddressSpace::Impl {
Impl() : process{GetCurrentProcess()} {
// Allocate virtual address placeholder for our address space.
@ -75,6 +81,7 @@ struct AddressSpace::Impl {
Common::GetLastErrorMsg());
// Take the reduction off of the system managed area, and leave the others unchanged.
reduction = size_t(virtual_base - SYSTEM_MANAGED_MIN);
system_managed_base = virtual_base;
system_managed_size = SystemManagedSize - reduction;
system_reserved_base = reinterpret_cast<u8*>(SYSTEM_RESERVED_MIN);
@ -95,7 +102,8 @@ struct AddressSpace::Impl {
const uintptr_t system_managed_addr = reinterpret_cast<uintptr_t>(system_managed_base);
const uintptr_t system_reserved_addr = reinterpret_cast<uintptr_t>(system_reserved_base);
const uintptr_t user_addr = reinterpret_cast<uintptr_t>(user_base);
placeholders.insert({system_managed_addr, virtual_size - reduction});
regions.emplace(system_managed_addr,
MemoryRegion{system_managed_addr, virtual_size - reduction, false});
// Allocate backing file that represents the total physical memory.
backing_handle =
@ -132,42 +140,15 @@ struct AddressSpace::Impl {
}
void* Map(VAddr virtual_addr, PAddr phys_addr, size_t size, ULONG prot, uintptr_t fd = 0) {
const size_t aligned_size = Common::AlignUp(size, 16_KB);
const auto it = placeholders.find(virtual_addr);
ASSERT_MSG(it != placeholders.end(), "Cannot map already mapped region");
ASSERT_MSG(virtual_addr >= it->lower() && virtual_addr + aligned_size <= it->upper(),
"Map range must be fully contained in a placeholder");
// Windows only allows splitting a placeholder into two.
// This means that if the map range is fully
// contained the the placeholder we need to perform two split operations,
// one at the start and at the end.
const VAddr placeholder_start = it->lower();
const VAddr placeholder_end = it->upper();
const VAddr virtual_end = virtual_addr + aligned_size;
// If the placeholder doesn't exactly start at virtual_addr, split it at the start.
if (placeholder_start != virtual_addr) {
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
virtual_addr - placeholder_start, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
// If the placeholder doesn't exactly end at virtual_end, split it at the end.
if (placeholder_end != virtual_end) {
VirtualFreeEx(process, reinterpret_cast<LPVOID>(virtual_end),
placeholder_end - virtual_end, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER);
}
// Remove the placeholder.
placeholders.erase({virtual_addr, virtual_end});
// Perform the map.
// Before mapping we must carve a placeholder with the exact properties of our mapping.
auto* region = EnsureSplitRegionForMapping(virtual_addr, size);
region->is_mapped = true;
void* ptr = nullptr;
if (phys_addr != -1) {
HANDLE backing = fd ? reinterpret_cast<HANDLE>(fd) : backing_handle;
if (fd && prot == PAGE_READONLY) {
DWORD resultvar;
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), aligned_size,
ptr = VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER,
PAGE_READWRITE, nullptr, 0);
bool ret = ReadFile(backing, ptr, size, &resultvar, NULL);
@ -176,12 +157,12 @@ struct AddressSpace::Impl {
ASSERT_MSG(ret, "VirtualProtect failed. {}", Common::GetLastErrorMsg());
} else {
ptr = MapViewOfFile3(backing, process, reinterpret_cast<PVOID>(virtual_addr),
phys_addr, aligned_size, MEM_REPLACE_PLACEHOLDER, prot,
phys_addr, size, MEM_REPLACE_PLACEHOLDER, prot,
nullptr, 0);
}
} else {
ptr =
VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), aligned_size,
VirtualAlloc2(process, reinterpret_cast<PVOID>(virtual_addr), size,
MEM_RESERVE | MEM_COMMIT | MEM_REPLACE_PLACEHOLDER, prot, nullptr, 0);
}
ASSERT_MSG(ptr, "{}", Common::GetLastErrorMsg());
@ -202,33 +183,114 @@ struct AddressSpace::Impl {
// The unmap call will create a new placeholder region. We need to see if we can coalesce it
// with neighbors.
VAddr placeholder_start = virtual_addr;
VAddr placeholder_end = virtual_addr + size;
JoinRegionsAfterUnmap(virtual_addr, size);
}
// The following code is inspired from Dolphin's MemArena
// https://github.com/dolphin-emu/dolphin/blob/deee3ee4/Source/Core/Common/MemArenaWin.cpp#L212
MemoryRegion* EnsureSplitRegionForMapping(VAddr address, size_t size) {
// Find closest region that is <= the given address by using upper bound and decrementing
auto it = regions.upper_bound(address);
ASSERT_MSG(it != regions.begin(), "Invalid address {:#x}", address);
--it;
ASSERT_MSG(!it->second.is_mapped,
"Attempt to map {:#x} with size {:#x} which overlaps with {:#x} mapping",
address, size, it->second.base);
auto& [base, region] = *it;
const VAddr mapping_address = region.base;
const size_t region_size = region.size;
if (mapping_address == address) {
// If this region is already split up correctly we don't have to do anything
if (region_size == size) {
return &region;
}
ASSERT_MSG(region_size >= size,
"Region with address {:#x} and size {:#x} can't fit {:#x}", mapping_address,
region_size, size);
// Split the placeholder.
if (!VirtualFreeEx(process, LPVOID(address), size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
return nullptr;
}
// Update tracked mappings and return the first of the two
region.size = size;
const VAddr new_mapping_start = address + size;
regions.emplace_hint(std::next(it), new_mapping_start,
MemoryRegion(new_mapping_start, region_size - size, false));
return &region;
}
ASSERT(mapping_address < address);
// Is there enough space to map this?
const size_t offset_in_region = address - mapping_address;
const size_t minimum_size = size + offset_in_region;
ASSERT(region_size >= minimum_size);
// Split the placeholder.
if (!VirtualFreeEx(process, LPVOID(address), size, MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) {
UNREACHABLE_MSG("Region splitting failed: {}", Common::GetLastErrorMsg());
return nullptr;
}
// Do we now have two regions or three regions?
if (region_size == minimum_size) {
// Split into two; update tracked mappings and return the second one
region.size = offset_in_region;
it = regions.emplace_hint(std::next(it), address,
MemoryRegion(address, size, false));
return &it->second;
} else {
// Split into three; update tracked mappings and return the middle one
region.size = offset_in_region;
const VAddr middle_mapping_start = address;
const size_t middle_mapping_size = size;
const VAddr after_mapping_start = address + size;
const size_t after_mapping_size = region_size - minimum_size;
it = regions.emplace_hint(std::next(it),
after_mapping_start, MemoryRegion(after_mapping_start, after_mapping_size, false));
it = regions.emplace_hint(it, middle_mapping_start,
MemoryRegion(middle_mapping_start, middle_mapping_size, false));
return &it->second;
}
}
void JoinRegionsAfterUnmap(VAddr address, size_t size) {
// There should be a mapping that matches the request exactly, find it
auto it = regions.find(address);
ASSERT_MSG(it != regions.end() && it->second.size == size,
"Invalid address/size given to unmap.");
auto& [base, region] = *it;
region.is_mapped = false;
// Check if a placeholder exists right before us.
const auto left_it = placeholders.find(virtual_addr - 1);
if (left_it != placeholders.end()) {
ASSERT_MSG(left_it->upper() == virtual_addr,
"Left placeholder does not end at virtual_addr!");
placeholder_start = left_it->lower();
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
placeholder_end - placeholder_start,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
auto it_prev = it != regions.begin() ? std::prev(it) : regions.end();
if (it_prev != regions.end() && !it_prev->second.is_mapped) {
const size_t total_size = it_prev->second.size + size;
if (!VirtualFreeEx(process, LPVOID(it_prev->first), total_size,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
}
it_prev->second.size = total_size;
it = regions.erase(it);
}
// Check if a placeholder exists right after us.
const auto right_it = placeholders.find(placeholder_end + 1);
if (right_it != placeholders.end()) {
ASSERT_MSG(right_it->lower() == placeholder_end,
"Right placeholder does not start at virtual_end!");
placeholder_end = right_it->upper();
VirtualFreeEx(process, reinterpret_cast<LPVOID>(placeholder_start),
placeholder_end - placeholder_start,
MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS);
}
auto it_next = std::next(it);
if (it_next != regions.end() && !it_next->second.is_mapped) {
const size_t total_size = size + it_next->second.size;
if (!VirtualFreeEx(process, LPVOID(it->first), total_size, MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) {
UNREACHABLE_MSG("Region coalescing failed: {}", Common::GetLastErrorMsg());
}
// Insert the new placeholder.
placeholders.insert({placeholder_start, placeholder_end});
it->second.size = total_size;
regions.erase(it_next);
}
}
void Protect(VAddr virtual_addr, size_t size, bool read, bool write, bool execute) {
@ -251,18 +313,21 @@ struct AddressSpace::Impl {
return;
}
DWORD old_flags{};
bool success =
VirtualProtect(reinterpret_cast<void*>(virtual_addr), size, new_flags, &old_flags);
if (!success) {
LOG_ERROR(Common_Memory,
"Failed to change virtual memory protection for address {:#x}, size {}",
virtual_addr, size);
const VAddr virtual_end = virtual_addr + size;
auto it = --regions.upper_bound(virtual_addr);
for (; it->first < virtual_end; it++) {
if (!it->second.is_mapped) {
continue;
}
const auto& region = it->second;
const size_t range_addr = std::max(region.base, virtual_addr);
const size_t range_size = std::min(region.base + region.size, virtual_end) - range_addr;
DWORD old_flags{};
if (!VirtualProtectEx(process, LPVOID(range_addr), range_size, new_flags, &old_flags)) {
UNREACHABLE_MSG("Failed to change virtual memory protection for address {:#x}, size {}",
range_addr, range_size);
}
}
// Use assert to ensure success in debug builds
DEBUG_ASSERT(success && "Failed to change virtual memory protection");
}
HANDLE process{};
@ -275,7 +340,7 @@ struct AddressSpace::Impl {
size_t system_reserved_size{};
u8* user_base{};
size_t user_size{};
boost::icl::separate_interval_set<uintptr_t> placeholders;
std::map<VAddr, MemoryRegion> regions;
};
#else

View File

@ -5,6 +5,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/scope_exit.h"
#include "common/logging/log.h"
#include "common/singleton.h"
#include "core/file_sys/fs.h"
@ -145,11 +146,6 @@ s32 PS4_SYSV_ABI sceKernelReserveVirtualRange(void** addr, u64 len, int flags, u
int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, int flags,
s64 directMemoryStart, u64 alignment,
const char* name) {
LOG_INFO(Kernel_Vmm,
"addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}, directMemoryStart = {:#x}, "
"alignment = {:#x}",
fmt::ptr(*addr), len, prot, flags, directMemoryStart, alignment);
if (len == 0 || !Common::Is16KBAligned(len)) {
LOG_ERROR(Kernel_Vmm, "Map size is either zero or not 16KB aligned!");
return SCE_KERNEL_ERROR_EINVAL;
@ -168,6 +164,13 @@ int PS4_SYSV_ABI sceKernelMapNamedDirectMemory(void** addr, u64 len, int prot, i
const VAddr in_addr = reinterpret_cast<VAddr>(*addr);
const auto mem_prot = static_cast<Core::MemoryProt>(prot);
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
SCOPE_EXIT {
LOG_INFO(Kernel_Vmm,
"in_addr = {:#x}, out_addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}, directMemoryStart = {:#x}, "
"alignment = {:#x}",
in_addr, fmt::ptr(*addr), len, prot, flags, directMemoryStart, alignment);
};
auto* memory = Core::Memory::Instance();
return memory->MapMemory(addr, in_addr, len, mem_prot, map_flags, Core::VMAType::Direct, "",
false, directMemoryStart, alignment);
@ -201,13 +204,13 @@ s32 PS4_SYSV_ABI sceKernelMapNamedFlexibleMemory(void** addr_in_out, std::size_t
const VAddr in_addr = reinterpret_cast<VAddr>(*addr_in_out);
const auto mem_prot = static_cast<Core::MemoryProt>(prot);
const auto map_flags = static_cast<Core::MemoryMapFlags>(flags);
SCOPE_EXIT {
LOG_INFO(Kernel_Vmm, "in_addr = {:#x}, out_addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}",
in_addr, fmt::ptr(*addr_in_out), len, prot, flags);
};
auto* memory = Core::Memory::Instance();
const int ret = memory->MapMemory(addr_in_out, in_addr, len, mem_prot, map_flags,
Core::VMAType::Flexible, name);
LOG_INFO(Kernel_Vmm, "addr = {}, len = {:#x}, prot = {:#x}, flags = {:#x}",
fmt::ptr(*addr_in_out), len, prot, flags);
return ret;
return memory->MapMemory(addr_in_out, in_addr, len, mem_prot, map_flags,
Core::VMAType::Flexible, name);
}
s32 PS4_SYSV_ABI sceKernelMapFlexibleMemory(void** addr_in_out, std::size_t len, int prot,

View File

@ -169,8 +169,8 @@ struct PthreadAttr {
};
using PthreadAttrT = PthreadAttr*;
static constexpr u32 ThrStackDefault = 2_MB;
static constexpr u32 ThrStackInitial = 2 * ThrStackDefault;
static constexpr u32 ThrStackDefault = 1_MB;
static constexpr u32 ThrStackInitial = 2_MB;
static constexpr u32 ThrPageSize = 16_KB;
static constexpr u32 ThrGuardDefault = ThrPageSize;

View File

@ -22,8 +22,6 @@ int ThreadState::CreateStack(PthreadAttr* attr) {
return 0;
}
VAddr stackaddr;
/*
* Round up stack size to nearest multiple of _thr_page_size so
* that mmap() * will work. If the stack size is not an even
@ -83,7 +81,7 @@ int ThreadState::CreateStack(PthreadAttr* attr) {
}
/* Allocate a new stack. */
stackaddr = last_stack - stacksize - guardsize;
VAddr stackaddr = last_stack - stacksize - guardsize;
/*
* Even if stack allocation fails, we don't want to try to

View File

@ -133,6 +133,10 @@ public:
rasterizer = rasterizer_;
}
AddressSpace& GetAddressSpace() {
return impl;
}
u64 GetTotalDirectSize() const {
return total_direct_size;
}

View File

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
#include <cryptopp/sha.h>
#include "common/alignment.h"

View File

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
#include "common/assert.h"
#include "common/config.h"
#include "common/debug.h"
@ -550,7 +550,7 @@ Liverpool::Task Liverpool::ProcessGraphics(std::span<const u32> dcb, std::span<c
sizeof(u32), false);
} else if (dma_data->src_sel == DmaDataSrc::Gds &&
dma_data->dst_sel == DmaDataDst::Memory) {
LOG_WARNING(Render_Vulkan, "GDS memory read");
//LOG_WARNING(Render_Vulkan, "GDS memory read");
} else if (dma_data->src_sel == DmaDataSrc::Memory &&
dma_data->dst_sel == DmaDataDst::Memory) {
rasterizer->InlineData(dma_data->DstAddress<VAddr>(),

View File

@ -7,6 +7,7 @@
#include "common/assert.h"
#include "common/error.h"
#include "common/signal_context.h"
#include "core/memory.h"
#include "core/signals.h"
#include "video_core/page_manager.h"
#include "video_core/renderer_vulkan/vk_rasterizer.h"
@ -145,15 +146,11 @@ struct PageManager::Impl {
ASSERT_MSG(owned_ranges.find(address) != owned_ranges.end(),
"Attempted to track non-GPU memory at address {:#x}, size {:#x}.", address,
size);
#ifdef _WIN32
DWORD prot = allow_write ? PAGE_READWRITE : PAGE_READONLY;
DWORD old_prot{};
BOOL result = VirtualProtect(std::bit_cast<LPVOID>(address), size, prot, &old_prot);
ASSERT_MSG(result != 0, "Region protection failed");
#else
mprotect(reinterpret_cast<void*>(address), size,
PROT_READ | (allow_write ? PROT_WRITE : 0));
#endif
auto* memory = Core::Memory::Instance();
auto& impl = memory->GetAddressSpace();
impl.Protect(address, size,
allow_write ? Core::MemoryPermission::ReadWrite
: Core::MemoryPermission::Read);
}
static bool GuestFaultSignalHandler(void* context, void* fault_address) {

View File

@ -1,6 +1,6 @@
// SPDX-FileCopyrightText: Copyright 2024 shadPS4 Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma clang optimize off
// Include the vulkan platform specific header
#if defined(ANDROID)
#define VK_USE_PLATFORM_ANDROID_KHR