Moving xe::Memory and code cache to abstracted APIs.

This commit is contained in:
Ben Vanik 2015-07-15 22:10:05 -07:00
parent fefaa31cd8
commit 8154d1dad6
7 changed files with 118 additions and 108 deletions

View File

@ -58,6 +58,15 @@ bool DeallocFixed(void* base_address, size_t length,
bool Protect(void* base_address, size_t length, PageAccess access,
PageAccess* out_old_access);
typedef void* FileMappingHandle;
FileMappingHandle CreateFileMappingHandle(std::wstring path, size_t length,
PageAccess access, bool commit);
void CloseFileMappingHandle(FileMappingHandle handle);
void* MapFileView(FileMappingHandle handle, void* base_address, size_t length,
PageAccess access, size_t file_offset);
bool UnmapFileView(FileMappingHandle handle, void* base_address, size_t length);
inline size_t hash_combine(size_t seed) { return seed; }
template <typename T, typename... Ts>

View File

@ -89,29 +89,68 @@ bool Protect(void* base_address, size_t length, PageAccess access,
DWORD new_protect = ToWin32ProtectFlags(access);
DWORD old_protect = 0;
BOOL result = VirtualProtect(base_address, length, new_protect, &old_protect);
if (result) {
if (out_old_access) {
switch (old_protect) {
case PAGE_NOACCESS:
*out_old_access = PageAccess::kNoAccess;
break;
case PAGE_READONLY:
*out_old_access = PageAccess::kReadOnly;
break;
case PAGE_READWRITE:
*out_old_access = PageAccess::kReadWrite;
break;
case PAGE_EXECUTE_READWRITE:
*out_old_access = PageAccess::kExecuteReadWrite;
default:
assert_unhandled_case(access);
break;
}
}
return true;
} else {
if (!result) {
return false;
}
if (out_old_access) {
switch (old_protect) {
case PAGE_NOACCESS:
*out_old_access = PageAccess::kNoAccess;
break;
case PAGE_READONLY:
*out_old_access = PageAccess::kReadOnly;
break;
case PAGE_READWRITE:
*out_old_access = PageAccess::kReadWrite;
break;
case PAGE_EXECUTE_READWRITE:
*out_old_access = PageAccess::kExecuteReadWrite;
default:
assert_unhandled_case(access);
break;
}
}
return true;
}
FileMappingHandle CreateFileMappingHandle(std::wstring path, size_t length,
PageAccess access, bool commit) {
DWORD protect =
ToWin32ProtectFlags(access) | (commit ? SEC_COMMIT : SEC_RESERVE);
return CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, protect,
static_cast<DWORD>(length >> 32),
static_cast<DWORD>(length), path.c_str());
}
void CloseFileMappingHandle(FileMappingHandle handle) { CloseHandle(handle); }
void* MapFileView(FileMappingHandle handle, void* base_address, size_t length,
PageAccess access, size_t file_offset) {
DWORD target_address_low = static_cast<DWORD>(file_offset);
DWORD target_address_high = static_cast<DWORD>(file_offset >> 32);
DWORD file_access = 0;
switch (access) {
case PageAccess::kReadOnly:
file_access = FILE_MAP_READ;
break;
case PageAccess::kReadWrite:
file_access = FILE_MAP_ALL_ACCESS;
break;
case PageAccess::kExecuteReadWrite:
file_access = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
break;
case PageAccess::kNoAccess:
default:
assert_unhandled_case(access);
return nullptr;
}
return MapViewOfFileEx(handle, file_access, target_address_high,
target_address_low, length, base_address);
}
bool UnmapFileView(FileMappingHandle handle, void* base_address,
size_t length) {
return UnmapViewOfFile(base_address) ? true : false;
}
} // namespace memory

View File

@ -31,15 +31,7 @@ namespace x64 {
// TODO(benvanik): move this to emitter.
const static uint32_t kUnwindInfoSize = 4 + (2 * 1 + 2 + 2);
X64CodeCache::X64CodeCache()
: mapping_(nullptr),
indirection_default_value_(0xFEEDF00D),
indirection_table_base_(nullptr),
generated_code_base_(nullptr),
generated_code_offset_(0),
generated_code_commit_mark_(0),
unwind_table_handle_(nullptr),
unwind_table_count_(0) {}
X64CodeCache::X64CodeCache() = default;
X64CodeCache::~X64CodeCache() {
if (indirection_table_base_) {
@ -60,9 +52,10 @@ X64CodeCache::~X64CodeCache() {
// Unmap all views and close mapping.
if (mapping_) {
UnmapViewOfFile(generated_code_base_);
CloseHandle(mapping_);
mapping_ = 0;
xe::memory::UnmapFileView(mapping_, generated_code_base_,
kGeneratedCodeSize);
xe::memory::CloseFileMappingHandle(mapping_);
mapping_ = nullptr;
}
}
@ -81,22 +74,20 @@ bool X64CodeCache::Initialize() {
}
// Create mmap file. This allows us to share the code cache with the debugger.
wchar_t file_name[256];
wsprintf(file_name, L"Local\\xenia_code_cache_%p",
Clock::QueryHostTickCount());
file_name_ = file_name;
mapping_ = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
PAGE_EXECUTE_READWRITE | SEC_RESERVE, 0,
kGeneratedCodeSize, file_name_.c_str());
file_name_ = std::wstring(L"Local\\xenia_code_cache_") +
std::to_wstring(Clock::QueryHostTickCount());
mapping_ = xe::memory::CreateFileMappingHandle(
file_name_, kGeneratedCodeSize, xe::memory::PageAccess::kExecuteReadWrite,
false);
if (!mapping_) {
XELOGE("Unable to create code cache mmap");
return false;
}
// Map generated code region into the file. Pages are committed as required.
generated_code_base_ = reinterpret_cast<uint8_t*>(MapViewOfFileEx(
mapping_, FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE, 0, 0,
kGeneratedCodeSize, reinterpret_cast<void*>(kGeneratedCodeBase)));
generated_code_base_ = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
mapping_, reinterpret_cast<void*>(kGeneratedCodeBase), kGeneratedCodeSize,
xe::memory::PageAccess::kExecuteReadWrite, 0));
if (!generated_code_base_) {
XELOGE("Unable to allocate code cache generated code storage");
XELOGE(

View File

@ -15,6 +15,7 @@
#include <string>
#include <vector>
#include "xenia/base/memory.h"
#include "xenia/base/mutex.h"
#include "xenia/base/platform_win.h"
#include "xenia/cpu/backend/code_cache.h"
@ -61,33 +62,33 @@ class X64CodeCache : public CodeCache {
void* LookupUnwindEntry(uintptr_t host_address);
std::wstring file_name_;
HANDLE mapping_;
xe::memory::FileMappingHandle mapping_ = nullptr;
// Must be held when manipulating the offsets or counts of anything, to keep
// the tables consistent and ordered.
xe::mutex allocation_mutex_;
// Value that the indirection table will be initialized with upon commit.
uint32_t indirection_default_value_;
uint32_t indirection_default_value_ = 0xFEEDF00D;
// Fixed at kIndirectionTableBase in host space, holding 4 byte pointers into
// the generated code table that correspond to the PPC functions in guest
// space.
uint8_t* indirection_table_base_;
uint8_t* indirection_table_base_ = nullptr;
// Fixed at kGeneratedCodeBase and holding all generated code, growing as
// needed.
uint8_t* generated_code_base_;
uint8_t* generated_code_base_ = nullptr;
// Current offset to empty space in generated code.
size_t generated_code_offset_;
size_t generated_code_offset_ = 0;
// Current high water mark of COMMITTED code.
std::atomic<size_t> generated_code_commit_mark_;
std::atomic<size_t> generated_code_commit_mark_ = 0;
// Growable function table system handle.
void* unwind_table_handle_;
void* unwind_table_handle_ = nullptr;
// Actual unwind table entries.
std::vector<RUNTIME_FUNCTION> unwind_table_;
// Current number of entries in the table.
std::atomic<uint32_t> unwind_table_count_;
std::atomic<uint32_t> unwind_table_count_ = 0;
};
} // namespace x64

View File

@ -22,6 +22,9 @@
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/util/xex2.h"
// For FileTimeToSystemTime and SystemTimeToFileTime:
#include "xenia/base/platform_win.h"
namespace xe {
namespace kernel {

View File

@ -24,10 +24,6 @@
// TODO(benvanik): move xbox.h out
#include "xenia/xbox.h"
#if !XE_PLATFORM_WIN32
#include <sys/mman.h>
#endif // WIN32
DEFINE_bool(protect_zero, false,
"Protect the zero page from reads and writes.");
@ -76,12 +72,7 @@ static Memory* active_memory_ = nullptr;
void CrashDump() { active_memory_->DumpMap(); }
Memory::Memory()
: virtual_membase_(nullptr),
physical_membase_(nullptr),
reserve_address_(0),
mapping_(0),
mapping_base_(nullptr) {
Memory::Memory() {
system_page_size_ = uint32_t(xe::memory::page_size());
assert_zero(active_memory_);
active_memory_ = this;
@ -107,9 +98,9 @@ Memory::~Memory() {
// Unmap all views and close mapping.
if (mapping_) {
UnmapViews();
CloseHandle(mapping_);
mapping_base_ = 0;
mapping_ = 0;
xe::memory::CloseFileMappingHandle(mapping_);
mapping_base_ = nullptr;
mapping_ = nullptr;
}
virtual_membase_ = nullptr;
@ -117,23 +108,15 @@ Memory::~Memory() {
}
int Memory::Initialize() {
wchar_t file_name[256];
wsprintf(file_name, L"Local\\xenia_memory_%p", Clock::QueryHostTickCount());
file_name_ = file_name;
file_name_ = std::wstring(L"Local\\xenia_memory_") +
std::to_wstring(Clock::QueryHostTickCount());
// Create main page file-backed mapping. This is all reserved but
// uncommitted (so it shouldn't expand page file).
#if XE_PLATFORM_WIN32
mapping_ = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
PAGE_READWRITE | SEC_RESERVE,
// entire 4gb space + 512mb physical:
1, 0x1FFFFFFF, file_name_.c_str());
#else
char mapping_path[] = "/xenia/mapping/XXXXXX";
mktemp(mapping_path);
mapping_ = shm_open(mapping_path, O_CREAT, 0);
ftruncate(mapping_, 0x11FFFFFFF);
#endif // XE_PLATFORM_WIN32
// Create main page file-backed mapping. This is all reserved but
// uncommitted (so it shouldn't expand page file).
mapping_ = xe::memory::CreateFileMappingHandle(
file_name_,
// entire 4gb space + 512mb physical:
0x11FFFFFFF, xe::memory::PageAccess::kReadWrite, false);
if (!mapping_) {
XELOGE("Unable to reserve the 4gb guest address space.");
assert_not_null(mapping_);
@ -236,21 +219,10 @@ const static struct {
int Memory::MapViews(uint8_t* mapping_base) {
assert_true(xe::countof(map_info) == xe::countof(views_.all_views));
for (size_t n = 0; n < xe::countof(map_info); n++) {
#if XE_PLATFORM_WIN32
DWORD target_address_low = static_cast<DWORD>(map_info[n].target_address);
DWORD target_address_high =
static_cast<DWORD>(map_info[n].target_address >> 32);
views_.all_views[n] = reinterpret_cast<uint8_t*>(MapViewOfFileEx(
mapping_, FILE_MAP_ALL_ACCESS, target_address_high, target_address_low,
views_.all_views[n] = reinterpret_cast<uint8_t*>(xe::memory::MapFileView(
mapping_, mapping_base + map_info[n].virtual_address_start,
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
mapping_base + map_info[n].virtual_address_start));
#else
views_.all_views[n] = reinterpret_cast<uint8_t*>(mmap(
map_info[n].virtual_address_start + mapping_base,
map_info[n].virtual_address_end - map_info[n].virtual_address_start + 1,
PROT_NONE, MAP_SHARED | MAP_FIXED, mapping_,
map_info[n].target_address));
#endif // XE_PLATFORM_WIN32
xe::memory::PageAccess::kReadWrite, map_info[n].target_address));
if (!views_.all_views[n]) {
// Failed, so bail and try again.
UnmapViews();
@ -263,13 +235,9 @@ int Memory::MapViews(uint8_t* mapping_base) {
void Memory::UnmapViews() {
for (size_t n = 0; n < xe::countof(views_.all_views); n++) {
if (views_.all_views[n]) {
#if XE_PLATFORM_WIN32
UnmapViewOfFile(views_.all_views[n]);
#else
size_t length = map_info[n].virtual_address_end -
map_info[n].virtual_address_start + 1;
munmap(views_.all_views[n], length);
#endif // XE_PLATFORM_WIN32
xe::memory::UnmapFileView(mapping_, views_.all_views[n], length);
}
}
}
@ -437,7 +405,6 @@ void Memory::DumpMap() {
}
xe::memory::PageAccess ToPageAccess(uint32_t protect) {
DWORD result = 0;
if ((protect & kMemoryProtectRead) && !(protect & kMemoryProtectWrite)) {
return xe::memory::PageAccess::kReadOnly;
} else if ((protect & kMemoryProtectRead) &&

View File

@ -16,8 +16,8 @@
#include <string>
#include <vector>
#include "xenia/base/memory.h"
#include "xenia/base/mutex.h"
#include "xenia/base/platform_win.h"
#include "xenia/cpu/mmio_handler.h"
namespace xe {
@ -216,13 +216,13 @@ class Memory {
private:
std::wstring file_name_;
uint32_t system_page_size_;
uint8_t* virtual_membase_;
uint8_t* physical_membase_;
uint64_t reserve_address_;
uint32_t system_page_size_ = 0;
uint8_t* virtual_membase_ = nullptr;
uint8_t* physical_membase_ = nullptr;
uint64_t reserve_address_ = 0;
HANDLE mapping_;
uint8_t* mapping_base_;
xe::memory::FileMappingHandle mapping_ = nullptr;
uint8_t* mapping_base_ = nullptr;
union {
struct {
uint8_t* v00000000;
@ -236,7 +236,7 @@ class Memory {
uint8_t* physical;
};
uint8_t* all_views[9];
} views_;
} views_ = {0};
std::unique_ptr<cpu::MMIOHandler> mmio_handler_;