mirror of
https://github.com/FEX-Emu/FEX.git
synced 2025-02-11 01:46:19 +00:00
Mtrack: Remove race conditions around concurrent invalidation and compilation
This commit is contained in:
parent
096ed29b5e
commit
33845a3112
@ -966,6 +966,9 @@ namespace FEXCore::Context {
|
||||
uintptr_t Context::CompileBlock(FEXCore::Core::CpuStateFrame *Frame, uint64_t GuestRIP) {
|
||||
auto Thread = Frame->Thread;
|
||||
|
||||
// Needs to be held for SMC interactions around concurrent compile and invalidation hazards
|
||||
auto InvalidationLk = Thread->CTX->SyscallHandler->CompileCodeLock(GuestRIP);
|
||||
|
||||
// Is the code in the cache?
|
||||
// The backends only check L1 and L2, not L3
|
||||
if (auto HostCode = Thread->LookupCache->FindBlock(GuestRIP)) {
|
||||
|
@ -79,6 +79,7 @@ namespace FEXCore::HLE {
|
||||
virtual FEXCore::CodeLoader *GetCodeLoader() const { return nullptr; }
|
||||
virtual void MarkGuestExecutableRange(uint64_t Start, uint64_t Length) { }
|
||||
virtual AOTIRCacheEntryLookupResult LookupAOTIRCacheEntry(uint64_t GuestAddr) = 0;
|
||||
virtual std::shared_lock<std::shared_mutex> CompileCodeLock(uint64_t Start) = 0;
|
||||
|
||||
protected:
|
||||
SyscallOSABI OSABI;
|
||||
|
@ -137,6 +137,11 @@ class DummySyscallHandler: public FEXCore::HLE::SyscallHandler {
|
||||
FEXCore::HLE::AOTIRCacheEntryLookupResult LookupAOTIRCacheEntry(uint64_t GuestAddr) override {
|
||||
return {0, 0, FHU::ScopedSignalMaskWithSharedLock {Mutex}};
|
||||
}
|
||||
|
||||
std::shared_mutex Mutex2;
|
||||
std::shared_lock<std::shared_mutex> CompileCodeLock(uint64_t Start) {
|
||||
return std::shared_lock(Mutex2);
|
||||
}
|
||||
};
|
||||
|
||||
int main(int argc, char **argv, char **const envp)
|
||||
|
@ -184,7 +184,10 @@ public:
|
||||
|
||||
///// VMA (Virtual Memory Area) tracking /////
|
||||
static bool HandleSegfault(FEXCore::Core::InternalThreadState *Thread, int Signal, void *info, void *ucontext);
|
||||
virtual void MarkGuestExecutableRange(uint64_t Start, uint64_t Length) override;
|
||||
void MarkGuestExecutableRange(uint64_t Start, uint64_t Length) override;
|
||||
// AOTIRCacheEntryLookupResult also includes a shared lock guard, so the pointed AOTIRCacheEntry return can be safely used
|
||||
FEXCore::HLE::AOTIRCacheEntryLookupResult LookupAOTIRCacheEntry(uint64_t GuestAddr) final override;
|
||||
std::shared_lock<std::shared_mutex> CompileCodeLock(uint64_t Start) override;
|
||||
|
||||
///// FORK tracking /////
|
||||
void LockBeforeFork();
|
||||
@ -224,9 +227,6 @@ private:
|
||||
|
||||
///// VMA (Virtual Memory Area) tracking /////
|
||||
|
||||
// AOTIRCacheEntryLookupResult also includes a shared lock guard, so the pointed AOTIRCacheEntry return can be safely used
|
||||
FEXCore::HLE::AOTIRCacheEntryLookupResult LookupAOTIRCacheEntry(uint64_t GuestAddr) final override;
|
||||
|
||||
|
||||
struct SpecialDev {
|
||||
static constexpr uint64_t Anon = 0x1'0000'0000; // Anonymous shared mapping, id is incrementing allocation number
|
||||
@ -293,7 +293,11 @@ private:
|
||||
|
||||
struct VMATracking {
|
||||
using VMAEntry = SyscallHandler::VMAEntry;
|
||||
// Held while reading/writing this struct
|
||||
std::shared_mutex Mutex;
|
||||
|
||||
// Held unique {invalidate, mprotect change} to guarantee mt invalidation correctness
|
||||
std::shared_mutex InvalidationMutex;
|
||||
|
||||
// Memory ranges indexed by page aligned starting address
|
||||
std::map<uint64_t, VMAEntry> VMAs;
|
||||
|
@ -63,6 +63,10 @@ bool SyscallHandler::HandleSegfault(FEXCore::Core::InternalThreadState *Thread,
|
||||
|
||||
auto FaultBase = FEXCore::AlignDown(FaultAddress, FHU::FEX_PAGE_SIZE);
|
||||
|
||||
// take lock, no code compilation happens between invalidation and prot change
|
||||
// no need for signal mask, we have an outer one
|
||||
std::unique_lock lk2(VMATracking->InvalidationMutex);
|
||||
|
||||
if (Entry->second.Flags.Shared) {
|
||||
LOGMAN_THROW_A_FMT(Entry->second.Resource, "VMA tracking error");
|
||||
|
||||
@ -75,25 +79,27 @@ bool SyscallHandler::HandleSegfault(FEXCore::Core::InternalThreadState *Thread,
|
||||
do {
|
||||
if (VMA->Offset <= Offset && (VMA->Offset + VMA->Length) > Offset) {
|
||||
auto FaultBaseMirrored = Offset - VMA->Offset + VMA->Base;
|
||||
FEXCore::Context::InvalidateGuestCodeRange(CTX, FaultBaseMirrored, FHU::FEX_PAGE_SIZE);
|
||||
if (VMA->Prot.Writable) {
|
||||
auto rv = mprotect((void *)FaultBaseMirrored, FHU::FEX_PAGE_SIZE, PROT_READ | PROT_WRITE);
|
||||
LogMan::Throw::AFmt(rv == 0, "mprotect({}, {}) failed", FaultBaseMirrored, FHU::FEX_PAGE_SIZE);
|
||||
}
|
||||
FEXCore::Context::InvalidateGuestCodeRange(CTX, FaultBaseMirrored, FHU::FEX_PAGE_SIZE);
|
||||
}
|
||||
} while ((VMA = VMA->ResourceNextVMA));
|
||||
} else {
|
||||
// Mark as read write before flush, so that if code is compiled after the Flush but before returning, the segfault will be
|
||||
// re-raised
|
||||
FEXCore::Context::InvalidateGuestCodeRange(CTX, FaultBase, FHU::FEX_PAGE_SIZE);
|
||||
auto rv = mprotect((void *)FaultBase, FHU::FEX_PAGE_SIZE, PROT_READ | PROT_WRITE);
|
||||
LogMan::Throw::AFmt(rv == 0, "mprotect({}, {}) failed", FaultBase, FHU::FEX_PAGE_SIZE);
|
||||
FEXCore::Context::InvalidateGuestCodeRange(CTX, FaultBase, FHU::FEX_PAGE_SIZE);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_lock<std::shared_mutex> SyscallHandler::CompileCodeLock(uint64_t Start) {
|
||||
return std::shared_lock(VMATracking.InvalidationMutex);
|
||||
}
|
||||
|
||||
void SyscallHandler::MarkGuestExecutableRange(uint64_t Start, uint64_t Length) {
|
||||
const auto Base = Start & FHU::FEX_PAGE_MASK;
|
||||
const auto Top = FEXCore::AlignUp(Start + Length, FHU::FEX_PAGE_SIZE);
|
||||
|
Loading…
x
Reference in New Issue
Block a user