MemMap: Support dynamic page size selection

i.e. 4K to 16K on ARM64.
This commit is contained in:
Stenzek 2024-10-05 19:25:23 +10:00
parent 0c2ab82252
commit 26b6c704f0
No known key found for this signature in database
22 changed files with 148 additions and 63 deletions

View File

@ -37,6 +37,15 @@ It does **not** use the LSB subdirectories of bin, share, etc, so you should dis
endif()
endif()
if(DEFINED HOST_MIN_PAGE_SIZE AND DEFINED HOST_MAX_PAGE_SIZE)
message(STATUS "Building with a dynamic page size of ${HOST_MIN_PAGE_SIZE} - ${HOST_MAX_PAGE_SIZE} bytes.")
elseif(DEFINED HOST_PAGE_SIZE)
message(STATUS "Building with detected page size of ${HOST_PAGE_SIZE}")
endif()
if(DEFINED HOST_CACHE_LINE_SIZE)
message(STATUS "Building with detected cache line size of ${HOST_CACHE_LINE_SIZE}")
endif()
if(NOT IS_SUPPORTED_COMPILER)
message(WARNING "*************** UNSUPPORTED CONFIGURATION ***************
You are not compiling DuckStation with a supported compiler.

View File

@ -110,13 +110,21 @@ endfunction()
function(detect_page_size)
# This is only needed for ARM64, or if the user hasn't overridden it explicitly.
if(NOT CPU_ARCH_ARM64 OR HOST_PAGE_SIZE)
# For universal Apple builds, we use preprocessor macros to determine page size.
# Similar for Windows, except it's always 4KB.
if(NOT CPU_ARCH_ARM64 OR NOT LINUX)
unset(HOST_PAGE_SIZE PARENT_SCOPE)
unset(HOST_MIN_PAGE_SIZE PARENT_SCOPE)
unset(HOST_MAX_PAGE_SIZE PARENT_SCOPE)
return()
elseif(DEFINED HOST_PAGE_SIZE)
return()
endif()
if(NOT LINUX)
# For universal Apple builds, we use preprocessor macros to determine page size.
# Similar for Windows, except it's always 4KB.
if(HOST_MIN_PAGE_SIZE OR HOST_MAX_PAGE_SIZE)
if(NOT HOST_MIN_PAGE_SIZE OR NOT HOST_MAX_PAGE_SIZE)
message(FATAL_ERROR "Both HOST_MIN_PAGE_SIZE and HOST_MAX_PAGE_SIZE must be defined.")
endif()
return()
endif()
@ -153,6 +161,7 @@ endfunction()
function(detect_cache_line_size)
# This is only needed for ARM64, or if the user hasn't overridden it explicitly.
if(NOT CPU_ARCH_ARM64 OR HOST_CACHE_LINE_SIZE)
unset(HOST_CACHE_LINE_SIZE PARENT_SCOPE)
return()
endif()

View File

@ -65,6 +65,11 @@ modules:
- "-DALLOW_INSTALL=ON"
- "-DINSTALL_SELF_CONTAINED=OFF"
# Set the page range to 4K-16K. This has no effect on X86, but is required for
# ARM builds, as some devices are now shipping with 16K kernels.
- "-DHOST_MIN_PAGE_SIZE=4096"
- "-DHOST_MAX_PAGE_SIZE=16384"
# Make sure we're using ThinLTO.
- "-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=ON"
- "-DCMAKE_C_COMPILER=/usr/lib/sdk/llvm17/bin/clang"

View File

@ -112,9 +112,18 @@ if(LINUX)
endif()
# If the host size was detected, we need to set it as a macro.
if(HOST_PAGE_SIZE)
target_compile_definitions(common PUBLIC "-DOVERRIDE_HOST_PAGE_SIZE=${HOST_PAGE_SIZE}")
if(DEFINED HOST_MIN_PAGE_SIZE AND DEFINED HOST_MAX_PAGE_SIZE)
target_compile_definitions(common PUBLIC
"-DMIN_HOST_PAGE_SIZE=${HOST_MIN_PAGE_SIZE}"
"-DMAX_HOST_PAGE_SIZE=${HOST_MAX_PAGE_SIZE}"
)
elseif(DEFINED HOST_PAGE_SIZE)
target_compile_definitions(common PUBLIC
"-DOVERRIDE_HOST_PAGE_SIZE=${HOST_PAGE_SIZE}"
)
endif()
if(HOST_CACHE_LINE_SIZE)
target_compile_definitions(common PUBLIC "-DOVERRIDE_HOST_CACHE_LINE_SIZE=${HOST_CACHE_LINE_SIZE}")
if(DEFINED HOST_CACHE_LINE_SIZE)
target_compile_definitions(common PUBLIC
"-DOVERRIDE_HOST_CACHE_LINE_SIZE=${HOST_CACHE_LINE_SIZE}"
)
endif()

View File

@ -27,6 +27,7 @@
#include <mach/mach_vm.h>
#include <mach/vm_map.h>
#include <sys/mman.h>
#include <sys/sysctl.h>
#else
#include <cerrno>
#include <dlfcn.h>
@ -42,8 +43,26 @@ namespace MemMap {
static void* AllocateJITMemoryAt(const void* addr, size_t size);
} // namespace MemMap
#ifdef DYNAMIC_HOST_PAGE_SIZE
const u32 HOST_PAGE_SIZE = MemMap::GetRuntimePageSize();
const u32 HOST_PAGE_MASK = MemMap::GetRuntimePageSize() - 1;
const u32 HOST_PAGE_SHIFT = std::bit_width(MemMap::GetRuntimePageSize() - 1);
#endif
#ifdef _WIN32
u32 MemMap::GetRuntimePageSize()
{
static u32 cached_page_size = 0;
if (cached_page_size != 0) [[likely]]
return cached_page_size;
SYSTEM_INFO si = {};
GetSystemInfo(&si);
cached_page_size = si.dwPageSize;
return cached_page_size;
}
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
{
DebugAssert((size & (HOST_PAGE_SIZE - 1)) == 0);
@ -193,7 +212,7 @@ bool SharedMemoryMappingArea::Create(size_t size)
return false;
m_size = size;
m_num_pages = size / HOST_PAGE_SIZE;
m_num_pages = size >> HOST_PAGE_SHIFT;
m_placeholder_ranges.emplace(0, size);
return true;
}
@ -339,6 +358,18 @@ bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size)
#elif defined(__APPLE__)
u32 MemMap::GetRuntimePageSize()
{
static u32 cached_page_size = 0;
if (cached_page_size != 0) [[likely]]
return cached_page_size;
size_t page_size_size = sizeof(cached_page_size);
if (sysctlbyname("hw.pagesize", &cached_page_size, &page_size_size, nullptr, 0) != 0) [[unlikely]]
cached_page_size = 0;
return cached_page_size;
}
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
{
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
@ -515,7 +546,7 @@ bool SharedMemoryMappingArea::Create(size_t size)
}
m_size = size;
m_num_pages = size / HOST_PAGE_SIZE;
m_num_pages = size >> HOST_PAGE_SHIFT;
return true;
}
@ -600,6 +631,17 @@ void MemMap::EndCodeWrite()
#else
u32 MemMap::GetRuntimePageSize()
{
static u32 cached_page_size = 0;
if (cached_page_size != 0) [[likely]]
return cached_page_size;
const int res = sysconf(_SC_PAGESIZE);
cached_page_size = (res > 0) ? static_cast<u32>(res) : 0;
return cached_page_size;
}
bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode)
{
DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned");
@ -788,7 +830,7 @@ bool SharedMemoryMappingArea::Create(size_t size)
m_base_ptr = static_cast<u8*>(alloc);
m_size = size;
m_num_pages = size / HOST_PAGE_SIZE;
m_num_pages = size >> HOST_PAGE_SHIFT;
return true;
}

View File

@ -51,6 +51,9 @@ enum class PageProtect : u32
class Error;
namespace MemMap {
/// Returns the size of pages for the current host.
u32 GetRuntimePageSize();
std::string GetFileMappingName(const char* prefix);
void* CreateSharedMemory(const char* name, size_t size, Error* error);
void DeleteSharedMemory(const char* name);
@ -101,7 +104,7 @@ public:
ALWAYS_INLINE u8* BasePointer() const { return m_base_ptr; }
ALWAYS_INLINE u8* OffsetPointer(size_t offset) const { return m_base_ptr + offset; }
ALWAYS_INLINE u8* PagePointer(size_t page) const { return m_base_ptr + HOST_PAGE_SIZE * page; }
ALWAYS_INLINE u8* PagePointer(size_t page) const { return m_base_ptr + (page << HOST_PAGE_SHIFT); }
bool Create(size_t size);
void Destroy();

View File

@ -187,6 +187,15 @@ struct dependent_int_false : std::false_type
#endif
// Host page sizes.
#if defined(MIN_HOST_PAGE_SIZE) || defined(MAX_HOST_PAGE_SIZE)
#if !defined(MIN_HOST_PAGE_SIZE) || !defined(MAX_HOST_PAGE_SIZE)
#error Both MIN_HOST_PAGE_SIZE and MAX_HOST_PAGE_SIZE need to be defined.
#endif
#define DYNAMIC_HOST_PAGE_SIZE 1
extern const u32 HOST_PAGE_SIZE;
extern const u32 HOST_PAGE_MASK;
extern const u32 HOST_PAGE_SHIFT;
#else
#if defined(OVERRIDE_HOST_PAGE_SIZE)
static constexpr u32 HOST_PAGE_SIZE = OVERRIDE_HOST_PAGE_SIZE;
static constexpr u32 HOST_PAGE_MASK = HOST_PAGE_SIZE - 1;
@ -200,6 +209,9 @@ static constexpr u32 HOST_PAGE_SIZE = 0x1000;
static constexpr u32 HOST_PAGE_MASK = HOST_PAGE_SIZE - 1;
static constexpr u32 HOST_PAGE_SHIFT = 12;
#endif
static constexpr u32 MIN_HOST_PAGE_SIZE = HOST_PAGE_SIZE;
static constexpr u32 MAX_HOST_PAGE_SIZE = HOST_PAGE_SIZE;
#endif
// Host cache line sizes.
#if defined(OVERRIDE_HOST_CACHE_LINE_SIZE)

View File

@ -565,7 +565,7 @@ void Bus::MapFastmemViews()
{
if (g_ram_code_bits[i])
{
u8* page_address = map_address + (i * HOST_PAGE_SIZE);
u8* page_address = map_address + (i << HOST_PAGE_SHIFT);
if (!MemMap::MemProtect(page_address, HOST_PAGE_SIZE, PageProtect::ReadOnly)) [[unlikely]]
{
ERROR_LOG("Failed to write-protect code page at {}", static_cast<void*>(page_address));
@ -706,7 +706,7 @@ void Bus::ClearRAMCodePage(u32 index)
void Bus::SetRAMPageWritable(u32 page_index, bool writable)
{
if (!MemMap::MemProtect(&g_ram[page_index * HOST_PAGE_SIZE], HOST_PAGE_SIZE,
if (!MemMap::MemProtect(&g_ram[page_index << HOST_PAGE_SHIFT], HOST_PAGE_SIZE,
writable ? PageProtect::ReadWrite : PageProtect::ReadOnly)) [[unlikely]]
{
ERROR_LOG("Failed to set RAM host page {} ({}) to {}", page_index,
@ -722,11 +722,11 @@ void Bus::SetRAMPageWritable(u32 page_index, bool writable)
// unprotect fastmem pages
for (const auto& it : s_fastmem_ram_views)
{
u8* page_address = it.first + (page_index * HOST_PAGE_SIZE);
u8* page_address = it.first + (page_index << HOST_PAGE_SHIFT);
if (!MemMap::MemProtect(page_address, HOST_PAGE_SIZE, protect)) [[unlikely]]
{
ERROR_LOG("Failed to {} code page {} (0x{:08X}) @ {}", writable ? "unprotect" : "protect", page_index,
page_index * static_cast<u32>(HOST_PAGE_SIZE), static_cast<void*>(page_address));
page_index << HOST_PAGE_SHIFT, static_cast<void*>(page_address));
}
}
@ -757,7 +757,7 @@ void Bus::ClearRAMCodePageFlags()
bool Bus::IsCodePageAddress(PhysicalMemoryAddress address)
{
return IsRAMAddress(address) ? g_ram_code_bits[(address & g_ram_mask) / HOST_PAGE_SIZE] : false;
return IsRAMAddress(address) ? g_ram_code_bits[(address & g_ram_mask) >> HOST_PAGE_SHIFT] : false;
}
bool Bus::HasCodePagesInRange(PhysicalMemoryAddress start_address, u32 size)
@ -770,7 +770,7 @@ bool Bus::HasCodePagesInRange(PhysicalMemoryAddress start_address, u32 size)
const u32 end_address = start_address + size;
while (start_address < end_address)
{
const u32 code_page_index = start_address / HOST_PAGE_SIZE;
const u32 code_page_index = start_address >> HOST_PAGE_SHIFT;
if (g_ram_code_bits[code_page_index])
return true;

View File

@ -95,8 +95,8 @@ enum : TickCount
enum : u32
{
RAM_2MB_CODE_PAGE_COUNT = (RAM_2MB_SIZE + (HOST_PAGE_SIZE - 1)) / HOST_PAGE_SIZE,
RAM_8MB_CODE_PAGE_COUNT = (RAM_8MB_SIZE + (HOST_PAGE_SIZE - 1)) / HOST_PAGE_SIZE,
RAM_2MB_CODE_PAGE_COUNT = (RAM_2MB_SIZE + (MIN_HOST_PAGE_SIZE - 1)) / MIN_HOST_PAGE_SIZE,
RAM_8MB_CODE_PAGE_COUNT = (RAM_8MB_SIZE + (MIN_HOST_PAGE_SIZE - 1)) / MIN_HOST_PAGE_SIZE,
MEMORY_LUT_PAGE_SIZE = 4096,
MEMORY_LUT_PAGE_SHIFT = 12,
@ -172,7 +172,7 @@ ALWAYS_INLINE static bool IsRAMAddress(PhysicalMemoryAddress address)
/// Returns the code page index for a RAM address.
ALWAYS_INLINE static u32 GetRAMCodePageIndex(PhysicalMemoryAddress address)
{
return (address & g_ram_mask) / HOST_PAGE_SIZE;
return (address & g_ram_mask) >> HOST_PAGE_SHIFT;
}
/// Returns true if the specified page contains code.

View File

@ -142,7 +142,7 @@ static constexpr u32 RECOMPILER_FAR_CODE_CACHE_SIZE = 16 * 1024 * 1024;
#define USE_CODE_BUFFER_SECTION 1
#ifdef __clang__
#pragma clang section bss = ".jitstorage"
__attribute__((aligned(HOST_PAGE_SIZE))) static u8 s_code_buffer_ptr[RECOMPILER_CODE_CACHE_SIZE];
__attribute__((aligned(MAX_HOST_PAGE_SIZE))) static u8 s_code_buffer_ptr[RECOMPILER_CODE_CACHE_SIZE];
#pragma clang section bss = ""
#endif
#else
@ -614,7 +614,7 @@ void CPU::CodeCache::InvalidateBlocksWithPageIndex(u32 index)
else if (ppi.invalidate_count > INVALIDATE_COUNT_FOR_MANUAL_PROTECTION)
{
DEV_LOG("{} invalidations in {} frames to page {} [0x{:08X} -> 0x{:08X}], switching to manual protection",
ppi.invalidate_count, frame_delta, index, (index * HOST_PAGE_SIZE), ((index + 1) * HOST_PAGE_SIZE));
ppi.invalidate_count, frame_delta, index, (index << HOST_PAGE_SHIFT), ((index + 1) << HOST_PAGE_SHIFT));
ppi.mode = PageProtectionMode::ManualCheck;
new_block_state = BlockState::NeedsRecompile;
}
@ -693,6 +693,7 @@ void CPU::CodeCache::InvalidateAllRAMBlocks()
void CPU::CodeCache::ClearBlocks()
{
for (u32 i = 0; i < Bus::RAM_8MB_CODE_PAGE_COUNT; i++)
{
PageProtectionInfo& ppi = s_page_protection[i];

View File

@ -3001,7 +3001,7 @@ ALWAYS_INLINE bool CPU::DoSafeMemoryAccess(VirtualMemoryAddress address, u32& va
}
else
{
const u32 page_index = offset / HOST_PAGE_SIZE;
const u32 page_index = offset >> HOST_PAGE_SHIFT;
if constexpr (size == MemoryAccessSize::Byte)
{

View File

@ -39,7 +39,10 @@
LOG_CHANNEL(GPU);
std::unique_ptr<GPU> g_gpu;
alignas(HOST_PAGE_SIZE) u16 g_vram[VRAM_SIZE / sizeof(u16)];
// aligning VRAM to 4K is fine, since the ARM64 instructions compute 4K page aligned addresses
// TOOD: REMOVE ME
alignas(4096) u16 g_vram[VRAM_SIZE / sizeof(u16)];
u16 g_gpu_clut[GPU_CLUT_SIZE];
const GPU::GP0CommandHandlerTable GPU::s_GP0_command_handler_table = GPU::GenerateGP0CommandHandlerTable();

View File

@ -15,6 +15,7 @@
#include "common/assert.h"
#include "common/file_system.h"
#include "common/log.h"
#include "common/memmap.h"
#include "common/path.h"
#include "common/string_util.h"
@ -85,6 +86,12 @@ float SettingInfo::FloatStepValue() const
return step_value ? StringUtil::FromChars<float>(step_value).value_or(fallback_value) : fallback_value;
}
#ifdef DYNAMIC_HOST_PAGE_SIZE
// See note in settings.h - 16K ends up faster with LUT because of nearby code/data.
const CPUFastmemMode Settings::DEFAULT_CPU_FASTMEM_MODE =
(MemMap::GetRuntimePageSize() > 4096) ? CPUFastmemMode::LUT : CPUFastmemMode::MMap;
#endif
#if defined(_WIN32)
const MediaCaptureBackend Settings::DEFAULT_MEDIA_CAPTURE_BACKEND = MediaCaptureBackend::MediaFoundation;
#elif !defined(__ANDROID__)

View File

@ -505,7 +505,9 @@ struct Settings
#endif
// LUT still ends up faster on Apple Silicon for now, because of 16K pages.
#if defined(ENABLE_MMAP_FASTMEM) && (!defined(__APPLE__) || !defined(__aarch64__))
#ifdef DYNAMIC_HOST_PAGE_SIZE
static const CPUFastmemMode DEFAULT_CPU_FASTMEM_MODE;
#elif defined(ENABLE_MMAP_FASTMEM) && (!defined(__APPLE__) || !defined(__aarch64__))
static constexpr CPUFastmemMode DEFAULT_CPU_FASTMEM_MODE = CPUFastmemMode::MMap;
#else
static constexpr CPUFastmemMode DEFAULT_CPU_FASTMEM_MODE = CPUFastmemMode::LUT;

View File

@ -55,6 +55,7 @@
#include "common/file_system.h"
#include "common/layered_settings_interface.h"
#include "common/log.h"
#include "common/memmap.h"
#include "common/path.h"
#include "common/string_util.h"
#include "common/threading.h"
@ -373,8 +374,9 @@ bool System::Internal::PerformEarlyHardwareChecks(Error* error)
#endif
#endif
#ifndef DYNAMIC_HOST_PAGE_SIZE
// Check page size. If it doesn't match, it is a fatal error.
const size_t runtime_host_page_size = PlatformMisc::GetRuntimePageSize();
const size_t runtime_host_page_size = MemMap::GetRuntimePageSize();
if (runtime_host_page_size == 0)
{
Error::SetStringFmt(error, "Cannot determine size of page. Continuing with expectation of {} byte pages.",
@ -388,6 +390,15 @@ bool System::Internal::PerformEarlyHardwareChecks(Error* error)
CPUThreadShutdown();
return false;
}
#else
if (HOST_PAGE_SIZE == 0 || HOST_PAGE_SIZE < MIN_HOST_PAGE_SIZE || HOST_PAGE_SIZE > MAX_HOST_PAGE_SIZE)
{
Error::SetStringFmt(error, "Page size of {} bytes is out of the range supported by this build: {}-{}.",
HOST_PAGE_SIZE, MIN_HOST_PAGE_SIZE, MAX_HOST_PAGE_SIZE);
CPUThreadShutdown();
return false;
}
#endif
return true;
}
@ -451,6 +462,10 @@ void System::LogStartupInformation()
INFO_LOG("CPU has {} logical processor(s) and {} core(s) across {} cluster(s).", package->processor_count,
package->core_count, package->cluster_count);
}
#ifdef DYNAMIC_HOST_PAGE_SIZE
INFO_LOG("Host Page Size: {} bytes", HOST_PAGE_SIZE);
#endif
}
bool System::Internal::ProcessStartup(Error* error)

View File

@ -606,8 +606,8 @@ void DebuggerWindow::setMemoryViewRegion(Bus::MemoryRegion region)
if (offset > Bus::g_ram_size)
return;
const u32 start_page = static_cast<u32>(offset) / HOST_PAGE_SIZE;
const u32 end_page = static_cast<u32>(offset + count - 1) / HOST_PAGE_SIZE;
const u32 start_page = static_cast<u32>(offset) >> HOST_PAGE_SHIFT;
const u32 end_page = static_cast<u32>(offset + count - 1) >> HOST_PAGE_SHIFT;
for (u32 i = start_page; i <= end_page; i++)
{
if (Bus::g_ram_code_bits[i])

View File

@ -98,6 +98,7 @@ bool RegTestHost::InitializeConfig()
// default settings for runner
SettingsInterface& si = *s_base_settings_interface.get();
g_settings.Load(si, si);
g_settings.Save(si, false);
si.SetStringValue("GPU", "Renderer", Settings::GetRendererName(GPURenderer::Software));
si.SetBoolValue("GPU", "DisableShaderCache", true);

View File

@ -12,9 +12,6 @@ bool InitializeSocketSupport(Error* error);
void SuspendScreensaver();
void ResumeScreensaver();
/// Returns the size of pages for the current host.
size_t GetRuntimePageSize();
/// Abstracts platform-specific code for asynchronously playing a sound.
/// On Windows, this will use PlaySound(). On Linux, it will shell out to aplay. On MacOS, it uses NSSound.
bool PlaySoundAsync(const char* path);

View File

@ -7,7 +7,6 @@
#include <QuartzCore/QuartzCore.h>
#include <cinttypes>
#include <optional>
#include <sys/sysctl.h>
#include <vector>
#include "metal_layer.h"
@ -78,22 +77,6 @@ void PlatformMisc::ResumeScreensaver()
s_screensaver_suspended = false;
}
template<typename T>
static std::optional<T> sysctlbyname(const char* name)
{
T output = 0;
size_t output_size = sizeof(output);
if (sysctlbyname(name, &output, &output_size, nullptr, 0) != 0)
return std::nullopt;
return output;
}
size_t PlatformMisc::GetRuntimePageSize()
{
return sysctlbyname<u32>("hw.pagesize").value_or(0);
}
bool PlatformMisc::PlaySoundAsync(const char* path)
{
NSString* nspath = [[NSString alloc] initWithUTF8String:path];

View File

@ -135,12 +135,6 @@ void PlatformMisc::ResumeScreensaver()
s_screensaver_suspended = false;
}
size_t PlatformMisc::GetRuntimePageSize()
{
int res = sysconf(_SC_PAGESIZE);
return (res > 0) ? static_cast<size_t>(res) : 0;
}
bool PlatformMisc::PlaySoundAsync(const char* path)
{
#ifdef __linux__

View File

@ -78,13 +78,6 @@ void PlatformMisc::ResumeScreensaver()
s_screensaver_suspended = false;
}
size_t PlatformMisc::GetRuntimePageSize()
{
SYSTEM_INFO si = {};
GetSystemInfo(&si);
return si.dwPageSize;
}
bool PlatformMisc::PlaySoundAsync(const char* path)
{
const std::wstring wpath(FileSystem::GetWin32Path(path));

View File

@ -798,7 +798,7 @@ void VulkanDevice::ProcessDeviceExtensions()
// vk_ext_external_memory_host is only used if the import alignment is the same as the system's page size
m_optional_extensions.vk_ext_external_memory_host &=
(external_memory_host_properties.minImportedHostPointerAlignment == HOST_PAGE_SIZE);
(external_memory_host_properties.minImportedHostPointerAlignment <= HOST_PAGE_SIZE);
#define LOG_EXT(name, field) INFO_LOG(name " is {}", m_optional_extensions.field ? "supported" : "NOT supported")