cpu: Implement exclusivewrite callback for dynarmic

This commit is contained in:
sunho 2021-05-07 20:48:35 +09:00 committed by Nicolas Jallamion
parent f76f2f2c39
commit ada53448f3
4 changed files with 79 additions and 0 deletions

View File

@ -99,6 +99,21 @@ public:
}
}
bool MemoryWriteExclusive8(Dynarmic::A32::VAddr addr, uint8_t value, uint8_t expected) override {
return Ptr<uint8_t>(addr).atomic_compare_and_swap(*parent->mem, value, expected);
}
bool MemoryWriteExclusive16(Dynarmic::A32::VAddr addr, uint16_t value, uint16_t expected) override {
return Ptr<uint16_t>(addr).atomic_compare_and_swap(*parent->mem, value, expected);
}
bool MemoryWriteExclusive32(Dynarmic::A32::VAddr addr, uint32_t value, uint32_t expected) override {
return Ptr<uint32_t>(addr).atomic_compare_and_swap(*parent->mem, value, expected);
}
bool MemoryWriteExclusive64(Dynarmic::A32::VAddr addr, uint64_t value, uint64_t expected) override {
return Ptr<uint64_t>(addr).atomic_compare_and_swap(*parent->mem, value, expected);
}
void InterpreterFallback(Dynarmic::A32::VAddr addr, size_t num_insts) {
const bool thumb = cpu->is_thumb_mode();

View File

@ -1,6 +1,7 @@
add_library(
mem
STATIC
include/mem/atomic.h
include/mem/functions.h
include/mem/mempool.h
include/mem/block.h

View File

@ -0,0 +1,50 @@
#pragma once
#include <cstring>
#include <memory>
#if WIN32
#include <intrin.h>
#endif
#if WIN32
inline bool atomic_compare_and_swap(volatile uint8_t *pointer, uint8_t value, uint8_t expected) {
const uint8_t result = _InterlockedCompareExchange8(reinterpret_cast<volatile char *>(pointer), value, expected);
return result == expected;
}
inline bool atomic_compare_and_swap(volatile uint16_t *pointer, uint16_t value, uint16_t expected) {
const uint16_t result = _InterlockedCompareExchange16(reinterpret_cast<volatile short *>(pointer), value, expected);
return result == expected;
}
inline bool atomic_compare_and_swap(volatile uint32_t *pointer, uint32_t value, uint32_t expected) {
const uint32_t result = _InterlockedCompareExchange(reinterpret_cast<volatile long *>(pointer), value, expected);
return result == expected;
}
inline bool atomic_compare_and_swap(volatile uint64_t *pointer, uint64_t value, uint64_t expected) {
const uint64_t result = _InterlockedCompareExchange64(reinterpret_cast<volatile __int64 *>(pointer),
value, expected);
return result == expected;
}
#else
inline bool atomic_compare_and_swap(volatile uint8_t *pointer, uint8_t value, uint8_t expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
inline bool atomic_compare_and_swap(volatile uint16_t *pointer, uint16_t value, uint16_t expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
inline bool atomic_compare_and_swap(volatile uint32_t *pointer, uint32_t value, uint32_t expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
inline bool atomic_compare_and_swap(volatile uint64_t *pointer, uint64_t value, uint64_t expected) {
return __sync_bool_compare_and_swap(pointer, expected, value);
}
#endif

View File

@ -17,6 +17,8 @@
#pragma once
#include <cassert>
#include <mem/atomic.h>
#include <mem/functions.h>
#include <mem/state.h>
@ -64,6 +66,17 @@ public:
}
}
template <class U>
bool atomic_compare_and_swap(MemState &mem, U value, U expected) {
if constexpr (std::is_arithmetic_v<U>) {
static_assert(std::is_same<U, T>::value);
const auto ptr = reinterpret_cast<volatile U *>(&mem.memory[addr]);
return ::atomic_compare_and_swap(ptr, value, expected);
} else {
assert(false);
}
}
bool valid(const MemState &mem) const {
return (mem.allocated_pages[addr / mem.page_size] != 0);
}