Rewrite host GPU abstraction

- Don't have to repeat the same thing for 4 renderers.
 - Add native Metal renderer.
This commit is contained in:
Stenzek 2023-08-13 13:42:02 +10:00
parent bfa792ddbf
commit e3d9ba4c99
249 changed files with 28851 additions and 32222 deletions

View File

@ -10,7 +10,114 @@ AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: Inline
AllowShortFunctionsOnASingleLine: InlineOnly
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: true
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterCaseLabel: true
AfterClass: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 120
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 2
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: true
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 2
UseTab: Never
...
---
Language: ObjC
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: InlineOnly
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None

View File

@ -23,7 +23,7 @@ elseif(${CMAKE_SYSTEM_NAME} STREQUAL "FreeBSD")
endif()
# Set minimum OS version for macOS. 10.14 should work.
set(CMAKE_OSX_DEPLOYMENT_TARGET "10.14.0" CACHE STRING "")
set(CMAKE_OSX_DEPLOYMENT_TARGET "11.0" CACHE STRING "")
# Global options.
if(NOT ANDROID)
@ -46,13 +46,7 @@ endif()
if(SUPPORTS_WAYLAND)
option(USE_WAYLAND "Support Wayland window system" ON)
endif()
if((LINUX OR FREEBSD) OR ANDROID)
option(USE_EGL "Support EGL OpenGL context creation" ON)
endif()
if((LINUX OR FREEBSD) AND NOT ANDROID)
option(USE_DRMKMS "Support DRM/KMS OpenGL contexts" OFF)
option(USE_FBDEV "Support FBDev OpenGL contexts" OFF)
option(USE_EVDEV "Support EVDev controller interface" OFF)
option(USE_DBUS "Enable DBus support for screensaver inhibiting" ON)
endif()
@ -122,21 +116,6 @@ endif()
if(USE_WAYLAND)
message(STATUS "Wayland support enabled")
endif()
if(USE_DRMKMS AND USE_FBDEV)
message(FATAL_ERROR "Only one of DRM/KMS and FBDev can be enabled")
endif()
if(USE_DRMKMS)
find_package(GBM REQUIRED)
find_package(Libdrm REQUIRED)
message(STATUS "DRM/KMS support enabled")
endif()
if(USE_FBDEV)
message(STATUS "FBDev Support enabled")
endif()
if(USE_EVDEV)
message(STATUS "EVDev Support enabled")
find_package(LIBEVDEV REQUIRED)
endif()
if(ENABLE_CHEEVOS)
message(STATUS "RetroAchievements support enabled")
endif()

View File

@ -1,70 +0,0 @@
# https://fossies.org/linux/misc/xbmc-18.9-Leia.tar.gz/xbmc-18.9-Leia/cmake/modules/FindGBM.cmake?m=t
# FindGBM
# ----------
# Finds the GBM library
#
# This will define the following variables::
#
# GBM_FOUND - system has GBM
# GBM_INCLUDE_DIRS - the GBM include directory
# GBM_LIBRARIES - the GBM libraries
# GBM_DEFINITIONS - the GBM definitions
#
# and the following imported targets::
#
# GBM::GBM - The GBM library
if(PKG_CONFIG_FOUND)
pkg_check_modules(PC_GBM gbm QUIET)
endif()
find_path(GBM_INCLUDE_DIR NAMES gbm.h
PATHS ${PC_GBM_INCLUDEDIR})
find_library(GBM_LIBRARY NAMES gbm
PATHS ${PC_GBM_LIBDIR})
set(GBM_VERSION ${PC_GBM_VERSION})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GBM
REQUIRED_VARS GBM_LIBRARY GBM_INCLUDE_DIR
VERSION_VAR GBM_VERSION)
include(CheckCSourceCompiles)
set(CMAKE_REQUIRED_LIBRARIES ${GBM_LIBRARY})
check_c_source_compiles("#include <gbm.h>
int main()
{
gbm_bo_map(NULL, 0, 0, 0, 0, GBM_BO_TRANSFER_WRITE, NULL, NULL);
}
" GBM_HAS_BO_MAP)
check_c_source_compiles("#include <gbm.h>
int main()
{
gbm_surface_create_with_modifiers(NULL, 0, 0, 0, NULL, 0);
}
" GBM_HAS_MODIFIERS)
if(GBM_FOUND)
set(GBM_LIBRARIES ${GBM_LIBRARY})
set(GBM_INCLUDE_DIRS ${GBM_INCLUDE_DIR})
set(GBM_DEFINITIONS -DHAVE_GBM=1)
if(GBM_HAS_BO_MAP)
list(APPEND GBM_DEFINITIONS -DHAS_GBM_BO_MAP=1)
endif()
if(GBM_HAS_MODIFIERS)
list(APPEND GBM_DEFINITIONS -DHAS_GBM_MODIFIERS=1)
endif()
if(NOT TARGET GBM::GBM)
add_library(GBM::GBM UNKNOWN IMPORTED)
set_target_properties(GBM::GBM PROPERTIES
IMPORTED_LOCATION "${GBM_LIBRARY}"
INTERFACE_INCLUDE_DIRECTORIES "${GBM_INCLUDE_DIR}")
endif()
endif()
mark_as_advanced(GBM_INCLUDE_DIR GBM_LIBRARY)

View File

@ -1,34 +0,0 @@
# - Try to find libevdev
# Once done this will define
# LIBEVDEV_FOUND - System has libevdev
# LIBEVDEV_INCLUDE_DIRS - The libevdev include directories
# LIBEVDEV_LIBRARIES - The libraries needed to use libevdev
find_package(PkgConfig)
pkg_check_modules(PC_LIBEVDEV QUIET libevdev)
FIND_PATH(
LIBEVDEV_INCLUDE_DIR libevdev/libevdev.h
HINTS ${PC_LIBEVDEV_INCLUDEDIR} ${PC_LIBEVDEV_INCLUDE_DIRS}
/usr/include
/usr/local/include
/usr/local/include/libevdev-1.0
${LIBEVDEV_PATH_INCLUDES}
)
FIND_LIBRARY(
LIBEVDEV_LIBRARY
NAMES evdev libevdev
HINTS ${PC_LIBEVDEV_LIBDIR} ${PC_LIBEVDEV_LIBRARY_DIRS}
PATHS ${ADDITIONAL_LIBRARY_PATHS}
${LIBEVDEV_PATH_LIB}
)
set(LIBEVDEV_LIBRARIES ${LIBEVDEV_LIBRARY} )
set(LIBEVDEV_INCLUDE_DIRS ${LIBEVDEV_INCLUDE_DIR} )
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LIBEVDEV DEFAULT_MSG
LIBEVDEV_LIBRARY LIBEVDEV_INCLUDE_DIR)
mark_as_advanced(LIBEVDEV_INCLUDE_DIR LIBEVDEV_LIBRARY )

View File

@ -1,107 +0,0 @@
# https://raw.githubusercontent.com/KDE/kwin/master/cmake/modules/FindLibdrm.cmake
#.rst:
# FindLibdrm
# -------
#
# Try to find libdrm on a Unix system.
#
# This will define the following variables:
#
# ``Libdrm_FOUND``
# True if (the requested version of) libdrm is available
# ``Libdrm_VERSION``
# The version of libdrm
# ``Libdrm_LIBRARIES``
# This can be passed to target_link_libraries() instead of the ``Libdrm::Libdrm``
# target
# ``Libdrm_INCLUDE_DIRS``
# This should be passed to target_include_directories() if the target is not
# used for linking
# ``Libdrm_DEFINITIONS``
# This should be passed to target_compile_options() if the target is not
# used for linking
#
# If ``Libdrm_FOUND`` is TRUE, it will also define the following imported target:
#
# ``Libdrm::Libdrm``
# The libdrm library
#
# In general we recommend using the imported target, as it is easier to use.
# Bear in mind, however, that if the target is in the link interface of an
# exported library, it must be made available by the package config file.
#=============================================================================
# SPDX-FileCopyrightText: 2014 Alex Merry <alex.merry@kde.org>
# SPDX-FileCopyrightText: 2014 Martin Gräßlin <mgraesslin@kde.org>
#
# SPDX-License-Identifier: BSD-3-Clause
#=============================================================================
if(CMAKE_VERSION VERSION_LESS 2.8.12)
message(FATAL_ERROR "CMake 2.8.12 is required by FindLibdrm.cmake")
endif()
if(CMAKE_MINIMUM_REQUIRED_VERSION VERSION_LESS 2.8.12)
message(AUTHOR_WARNING "Your project should require at least CMake 2.8.12 to use FindLibdrm.cmake")
endif()
if(NOT WIN32)
# Use pkg-config to get the directories and then use these values
# in the FIND_PATH() and FIND_LIBRARY() calls
find_package(PkgConfig)
pkg_check_modules(PKG_Libdrm QUIET libdrm)
set(Libdrm_DEFINITIONS ${PKG_Libdrm_CFLAGS_OTHER})
set(Libdrm_VERSION ${PKG_Libdrm_VERSION})
find_path(Libdrm_INCLUDE_DIR
NAMES
xf86drm.h
HINTS
${PKG_Libdrm_INCLUDE_DIRS}
)
find_library(Libdrm_LIBRARY
NAMES
drm
HINTS
${PKG_Libdrm_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Libdrm
FOUND_VAR
Libdrm_FOUND
REQUIRED_VARS
Libdrm_LIBRARY
Libdrm_INCLUDE_DIR
VERSION_VAR
Libdrm_VERSION
)
if(Libdrm_FOUND AND NOT TARGET Libdrm::Libdrm)
add_library(Libdrm::Libdrm UNKNOWN IMPORTED)
set_target_properties(Libdrm::Libdrm PROPERTIES
IMPORTED_LOCATION "${Libdrm_LIBRARY}"
INTERFACE_COMPILE_OPTIONS "${Libdrm_DEFINITIONS}"
INTERFACE_INCLUDE_DIRECTORIES "${Libdrm_INCLUDE_DIR}"
INTERFACE_INCLUDE_DIRECTORIES "${Libdrm_INCLUDE_DIR}/libdrm"
)
endif()
mark_as_advanced(Libdrm_LIBRARY Libdrm_INCLUDE_DIR)
# compatibility variables
set(Libdrm_LIBRARIES ${Libdrm_LIBRARY})
set(Libdrm_INCLUDE_DIRS ${Libdrm_INCLUDE_DIR} "${Libdrm_INCLUDE_DIR}/libdrm")
set(Libdrm_VERSION_STRING ${Libdrm_VERSION})
else()
message(STATUS "FindLibdrm.cmake cannot find libdrm on Windows systems.")
set(Libdrm_FOUND FALSE)
endif()
include(FeatureSummary)
set_package_properties(Libdrm PROPERTIES
URL "https://wiki.freedesktop.org/dri/"
DESCRIPTION "Userspace interface to kernel DRM services."
)

View File

@ -17,8 +17,6 @@ add_library(common
fifo_queue.h
file_system.cpp
file_system.h
gpu_texture.cpp
gpu_texture.h
image.cpp
image.h
hash_combine.h
@ -57,8 +55,6 @@ add_library(common
timer.cpp
timer.h
types.h
window_info.cpp
window_info.h
)
target_include_directories(common PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..")
@ -68,28 +64,6 @@ target_link_libraries(common PRIVATE stb libchdr zlib minizip Zstd::Zstd "${CMAK
if(WIN32)
target_sources(common PRIVATE
d3d12/context.cpp
d3d12/context.h
d3d12/descriptor_heap_manager.cpp
d3d12/descriptor_heap_manager.h
d3d12/shader_cache.cpp
d3d12/shader_cache.h
d3d12/staging_texture.cpp
d3d12/staging_texture.h
d3d12/stream_buffer.cpp
d3d12/stream_buffer.h
d3d12/texture.cpp
d3d12/texture.h
d3d12/util.cpp
d3d12/util.h
d3d11/shader_cache.cpp
d3d11/shader_cache.h
d3d11/shader_compiler.cpp
d3d11/shader_compiler.h
d3d11/stream_buffer.cpp
d3d11/stream_buffer.h
d3d11/texture.cpp
d3d11/texture.h
http_downloader_winhttp.cpp
http_downloader_winhttp.h
thirdparty/StackWalker.cpp
@ -123,147 +97,6 @@ if(ANDROID)
target_link_libraries(common PRIVATE log)
endif()
if(USE_X11)
target_sources(common PRIVATE
gl/x11_window.cpp
gl/x11_window.h
)
target_compile_definitions(common PRIVATE "-DUSE_X11=1")
target_include_directories(common PRIVATE "${X11_INCLUDE_DIR}" "${X11_Xrandr_INCLUDE_PATH}")
target_link_libraries(common PRIVATE "${X11_LIBRARIES}" "${X11_Xrandr_LIB}")
endif()
if(USE_WAYLAND)
target_compile_definitions(common PRIVATE "-DUSE_WAYLAND=1")
elseif(SUPPORTS_WAYLAND)
message(WARNING "Wayland support for renderers is disabled.\nDuckStation will FAIL to start on Wayland.")
endif()
if(USE_DRMKMS)
target_sources(common PRIVATE
drm_display.cpp
drm_display.h
)
target_link_libraries(common PUBLIC Libdrm::Libdrm)
endif()
if(ENABLE_OPENGL)
target_sources(common PRIVATE
gl/context.cpp
gl/context.h
gl/program.cpp
gl/program.h
gl/shader_cache.cpp
gl/shader_cache.h
gl/stream_buffer.cpp
gl/stream_buffer.h
gl/texture.cpp
gl/texture.h
)
target_compile_definitions(common PUBLIC "WITH_OPENGL=1")
target_link_libraries(common PRIVATE glad)
if(WIN32)
target_sources(common PRIVATE
gl/context_wgl.cpp
gl/context_wgl.h
)
endif()
if(USE_EGL)
target_sources(common PRIVATE
gl/context_egl.cpp
gl/context_egl.h
)
target_compile_definitions(common PRIVATE "-DUSE_EGL=1")
if(USE_X11)
target_sources(common PRIVATE
gl/context_egl_x11.cpp
gl/context_egl_x11.h
)
# We set EGL_NO_X11 because otherwise X comes in with its macros and breaks
# a bunch of files from compiling, if we include the EGL headers. This just
# makes the data types opaque, we can still use it with X11 if needed.
target_compile_definitions(common PRIVATE "-DEGL_NO_X11=1")
endif()
if(ANDROID AND USE_EGL)
target_sources(common PRIVATE
gl/context_egl_android.cpp
gl/context_egl_android.h
)
endif()
if(USE_DRMKMS)
target_compile_definitions(common PRIVATE "-DUSE_GBM=1")
target_sources(common PRIVATE
gl/context_egl_gbm.cpp
gl/context_egl_gbm.h
)
target_link_libraries(common PUBLIC GBM::GBM)
endif()
if(USE_FBDEV)
target_compile_definitions(common PRIVATE "-DUSE_FBDEV=1")
target_sources(common PRIVATE
gl/context_egl_fbdev.cpp
gl/context_egl_fbdev.h
)
endif()
endif()
if(USE_X11)
target_sources(common PRIVATE
gl/context_glx.cpp
gl/context_glx.h
)
target_compile_definitions(common PRIVATE "-DUSE_GLX=1")
endif()
if(USE_WAYLAND)
target_sources(common PRIVATE
gl/context_egl_wayland.cpp
gl/context_egl_wayland.h
)
endif()
if(APPLE)
target_sources(common PRIVATE
gl/context_agl.mm
gl/context_agl.h
)
endif()
endif()
if(ENABLE_VULKAN)
target_sources(common PRIVATE
vulkan/builders.cpp
vulkan/builders.h
vulkan/context.cpp
vulkan/context.h
vulkan/loader.h
vulkan/loader.cpp
vulkan/shader_cache.cpp
vulkan/shader_cache.h
vulkan/shader_compiler.cpp
vulkan/shader_compiler.h
vulkan/stream_buffer.cpp
vulkan/stream_buffer.h
vulkan/swap_chain.cpp
vulkan/swap_chain.h
vulkan/texture.cpp
vulkan/texture.h
vulkan/util.cpp
vulkan/util.h
)
target_compile_definitions(common PUBLIC "WITH_VULKAN=1")
target_link_libraries(common PRIVATE glslang)
if(APPLE)
# Needed for Vulkan Swap Chain.
target_link_libraries(common PRIVATE "objc")
endif()
endif()
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
# We need -lrt for shm_unlink
target_link_libraries(common PRIVATE rt)

View File

@ -3,6 +3,14 @@
#pragma once
#include "types.h"
#include <cstdlib>
#ifdef _MSC_VER
#include <malloc.h>
#endif
namespace Common {
template<typename T>
constexpr bool IsAligned(T value, unsigned int alignment)
@ -52,4 +60,30 @@ constexpr T PreviousPow2(T value)
value |= (value >> 16);
return value - (value >> 1);
}
ALWAYS_INLINE static void* AlignedMalloc(size_t size, size_t alignment)
{
#ifdef _MSC_VER
return _aligned_malloc(size, alignment);
#else
// Unaligned sizes are slow on macOS.
#ifdef __APPLE__
if (IsPow2(alignment))
size = (size + alignment - 1) & ~(alignment - 1);
#endif
void* ret = nullptr;
posix_memalign(&ret, alignment, size);
return ret;
#endif
}
ALWAYS_INLINE static void AlignedFree(void* ptr)
{
#ifdef _MSC_VER
_aligned_free(ptr);
#else
free(ptr);
#endif
}
} // namespace Common

View File

@ -2,15 +2,13 @@
<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions Condition="'$(Platform)'!='ARM64'">WITH_OPENGL=1;WITH_VULKAN=1;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories Condition="'$(Platform)'!='ARM64'">$(SolutionDir)dep\glad\include;$(SolutionDir)dep\vulkan\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)src;%(AdditionalIncludeDirectories);$(SolutionDir)dep\gsl\include;$(SolutionDir)dep\fast_float\include;$(SolutionDir)dep\fmt\include;$(SolutionDir)dep\stb\include;$(SolutionDir)dep\glslang;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include</AdditionalIncludeDirectories>
<AdditionalIncludeDirectories>$(SolutionDir)src;%(AdditionalIncludeDirectories);$(SolutionDir)dep\gsl\include;$(SolutionDir)dep\fast_float\include;$(SolutionDir)dep\fmt\include;$(SolutionDir)dep\zlib\include;$(SolutionDir)dep\minizip\include;$(SolutionDir)dep\stb\include</AdditionalIncludeDirectories>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup>
<Link>
<AdditionalDependencies>d3dcompiler.lib;d3d11.lib;%(AdditionalDependencies)</AdditionalDependencies>
<AdditionalDependencies>%(AdditionalDependencies);Comctl32.lib;winhttp.lib</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
</Project>

View File

@ -9,45 +9,12 @@
<ClInclude Include="build_timestamp.h" />
<ClInclude Include="byte_stream.h" />
<ClInclude Include="crash_handler.h" />
<ClInclude Include="d3d11\shader_cache.h" />
<ClInclude Include="d3d11\shader_compiler.h" />
<ClInclude Include="d3d11\stream_buffer.h" />
<ClInclude Include="d3d11\texture.h" />
<ClInclude Include="d3d12\context.h" />
<ClInclude Include="d3d12\descriptor_heap_manager.h" />
<ClInclude Include="d3d12\shader_cache.h" />
<ClInclude Include="d3d12\util.h" />
<ClInclude Include="d3d12\staging_texture.h" />
<ClInclude Include="d3d12\stream_buffer.h" />
<ClInclude Include="d3d12\texture.h" />
<ClInclude Include="dimensional_array.h" />
<ClInclude Include="easing.h" />
<ClInclude Include="error.h" />
<ClInclude Include="fastjmp.h" />
<ClInclude Include="fifo_queue.h" />
<ClInclude Include="file_system.h" />
<ClInclude Include="gl\context.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\context_wgl.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\loader.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\program.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\shader_cache.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\stream_buffer.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gl\texture.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gpu_texture.h" />
<ClInclude Include="hash_combine.h" />
<ClInclude Include="heap_array.h" />
<ClInclude Include="http_downloader.h" />
@ -74,77 +41,16 @@
<ClInclude Include="timer.h" />
<ClInclude Include="types.h" />
<ClInclude Include="minizip_helpers.h" />
<ClInclude Include="vulkan\builders.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\context.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\entry_points.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\loader.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\shader_cache.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\shader_compiler.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\stream_buffer.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\swap_chain.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\texture.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="vulkan\util.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="win32_progress_callback.h" />
<ClInclude Include="windows_headers.h" />
<ClInclude Include="window_info.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="assert.cpp" />
<ClCompile Include="byte_stream.cpp" />
<ClCompile Include="crash_handler.cpp" />
<ClCompile Include="d3d11\shader_cache.cpp" />
<ClCompile Include="d3d11\shader_compiler.cpp" />
<ClCompile Include="d3d11\stream_buffer.cpp" />
<ClCompile Include="d3d11\texture.cpp" />
<ClCompile Include="error.cpp" />
<ClCompile Include="d3d12\context.cpp" />
<ClCompile Include="d3d12\descriptor_heap_manager.cpp" />
<ClCompile Include="d3d12\shader_cache.cpp" />
<ClCompile Include="d3d12\staging_texture.cpp" />
<ClCompile Include="d3d12\stream_buffer.cpp" />
<ClCompile Include="d3d12\texture.cpp" />
<ClCompile Include="d3d12\util.cpp" />
<ClCompile Include="fastjmp.cpp" />
<ClCompile Include="file_system.cpp" />
<ClCompile Include="gl\context.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gl\context_wgl.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gl\program.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gl\shader_cache.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gl\stream_buffer.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gl\texture.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gpu_texture.cpp" />
<ClCompile Include="http_downloader.cpp" />
<ClCompile Include="http_downloader_winhttp.cpp" />
<ClCompile Include="image.cpp" />
@ -160,35 +66,7 @@
<ClCompile Include="thirdparty\StackWalker.cpp" />
<ClCompile Include="threading.cpp" />
<ClCompile Include="timer.cpp" />
<ClCompile Include="vulkan\builders.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\context.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\loader.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\shader_cache.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\shader_compiler.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\stream_buffer.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\swap_chain.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\texture.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="vulkan\util.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="win32_progress_callback.cpp" />
<ClCompile Include="window_info.cpp" />
</ItemGroup>
<ItemGroup>
<Natvis Include="bitfield.natvis" />
@ -204,20 +82,11 @@
<PreprocessorDefinitions Condition="'$(Platform)'=='Win32'">_M_X86_32;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions Condition="'$(Platform)'=='x64'">_M_X86_64;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</MASM>
<None Include="vulkan\entry_points.inl">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</None>
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\..\dep\fmt\fmt.vcxproj">
<Project>{8be398e6-b882-4248-9065-fecc8728e038}</Project>
</ProjectReference>
<ProjectReference Include="..\..\dep\glad\glad.vcxproj" Condition="'$(Platform)'!='ARM64'">
<Project>{43540154-9e1e-409c-834f-b84be5621388}</Project>
</ProjectReference>
<ProjectReference Include="..\..\dep\glslang\glslang.vcxproj" Condition="'$(Platform)'!='ARM64'">
<Project>{7f909e29-4808-4bd9-a60c-56c51a3aaec2}</Project>
</ProjectReference>
<ProjectReference Include="..\..\dep\minizip\minizip.vcxproj">
<Project>{8bda439c-6358-45fb-9994-2ff083babe06}</Project>
</ProjectReference>

View File

@ -5,24 +5,6 @@
<ClInclude Include="types.h" />
<ClInclude Include="fifo_queue.h" />
<ClInclude Include="heap_array.h" />
<ClInclude Include="gl\program.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="gl\stream_buffer.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="gl\texture.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="d3d11\stream_buffer.h">
<Filter>d3d11</Filter>
</ClInclude>
<ClInclude Include="d3d11\texture.h">
<Filter>d3d11</Filter>
</ClInclude>
<ClInclude Include="d3d11\shader_compiler.h">
<Filter>d3d11</Filter>
</ClInclude>
<ClInclude Include="rectangle.h" />
<ClInclude Include="log.h" />
<ClInclude Include="string.h" />
@ -33,47 +15,10 @@
<ClInclude Include="file_system.h" />
<ClInclude Include="string_util.h" />
<ClInclude Include="md5_digest.h" />
<ClInclude Include="d3d11\shader_cache.h">
<Filter>d3d11</Filter>
</ClInclude>
<ClInclude Include="hash_combine.h" />
<ClInclude Include="progress_callback.h" />
<ClInclude Include="gl\shader_cache.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="bitutils.h" />
<ClInclude Include="gl\context.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="gl\context_wgl.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="window_info.h" />
<ClInclude Include="vulkan\texture.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\stream_buffer.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\shader_compiler.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\util.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\swap_chain.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="dimensional_array.h" />
<ClInclude Include="vulkan\context.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\builders.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\shader_cache.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="image.h" />
<ClInclude Include="minizip_helpers.h" />
<ClInclude Include="win32_progress_callback.h" />
@ -86,40 +31,10 @@
<ClInclude Include="easing.h" />
<ClInclude Include="error.h" />
<ClInclude Include="platform.h" />
<ClInclude Include="d3d12\staging_texture.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\stream_buffer.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\texture.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\context.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\descriptor_heap_manager.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\util.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="d3d12\shader_cache.h">
<Filter>d3d12</Filter>
</ClInclude>
<ClInclude Include="http_downloader_winhttp.h" />
<ClInclude Include="http_downloader.h" />
<ClInclude Include="vulkan\entry_points.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="vulkan\loader.h">
<Filter>vulkan</Filter>
</ClInclude>
<ClInclude Include="path.h" />
<ClInclude Include="windows_headers.h" />
<ClInclude Include="gl\loader.h">
<Filter>gl</Filter>
</ClInclude>
<ClInclude Include="settings_interface.h" />
<ClInclude Include="layered_settings_interface.h" />
<ClInclude Include="heterogeneous_containers.h" />
@ -128,28 +43,9 @@
<ClInclude Include="scoped_guard.h" />
<ClInclude Include="build_timestamp.h" />
<ClInclude Include="sha1_digest.h" />
<ClInclude Include="gpu_texture.h" />
<ClInclude Include="fastjmp.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="gl\program.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="gl\stream_buffer.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="gl\texture.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="d3d11\texture.cpp">
<Filter>d3d11</Filter>
</ClCompile>
<ClCompile Include="d3d11\stream_buffer.cpp">
<Filter>d3d11</Filter>
</ClCompile>
<ClCompile Include="d3d11\shader_compiler.cpp">
<Filter>d3d11</Filter>
</ClCompile>
<ClCompile Include="string.cpp" />
<ClCompile Include="byte_stream.cpp" />
<ClCompile Include="log.cpp" />
@ -158,43 +54,7 @@
<ClCompile Include="file_system.cpp" />
<ClCompile Include="string_util.cpp" />
<ClCompile Include="md5_digest.cpp" />
<ClCompile Include="d3d11\shader_cache.cpp">
<Filter>d3d11</Filter>
</ClCompile>
<ClCompile Include="progress_callback.cpp" />
<ClCompile Include="gl\shader_cache.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="gl\context_wgl.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="gl\context.cpp">
<Filter>gl</Filter>
</ClCompile>
<ClCompile Include="vulkan\texture.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\context.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\stream_buffer.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\util.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\shader_compiler.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\swap_chain.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\builders.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="vulkan\shader_cache.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="image.cpp" />
<ClCompile Include="minizip_helpers.cpp" />
<ClCompile Include="win32_progress_callback.cpp" />
@ -203,64 +63,21 @@
</ClCompile>
<ClCompile Include="crash_handler.cpp" />
<ClCompile Include="error.cpp" />
<ClCompile Include="window_info.cpp" />
<ClCompile Include="d3d12\staging_texture.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\stream_buffer.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\texture.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\context.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\descriptor_heap_manager.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\util.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="d3d12\shader_cache.cpp">
<Filter>d3d12</Filter>
</ClCompile>
<ClCompile Include="http_downloader_winhttp.cpp" />
<ClCompile Include="http_downloader.cpp" />
<ClCompile Include="vulkan\loader.cpp">
<Filter>vulkan</Filter>
</ClCompile>
<ClCompile Include="layered_settings_interface.cpp" />
<ClCompile Include="memory_settings_interface.cpp" />
<ClCompile Include="threading.cpp" />
<ClCompile Include="sha1_digest.cpp" />
<ClCompile Include="gpu_texture.cpp" />
<ClCompile Include="fastjmp.cpp" />
</ItemGroup>
<ItemGroup>
<Natvis Include="bitfield.natvis" />
</ItemGroup>
<ItemGroup>
<Filter Include="gl">
<UniqueIdentifier>{52487c57-753d-4888-ba26-ed63ab51a234}</UniqueIdentifier>
</Filter>
<Filter Include="d3d11">
<UniqueIdentifier>{30251086-81f3-44f5-add4-7ff9a24098ab}</UniqueIdentifier>
</Filter>
<Filter Include="vulkan">
<UniqueIdentifier>{642ff5eb-af39-4aab-a42f-6eb8188a11d7}</UniqueIdentifier>
</Filter>
<Filter Include="thirdparty">
<UniqueIdentifier>{fd4150b0-6f82-4251-ab23-34c25fbc5b5e}</UniqueIdentifier>
</Filter>
<Filter Include="d3d12">
<UniqueIdentifier>{358e11c4-34af-4169-9a66-ec66342a6a2f}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<None Include="vulkan\entry_points.inl">
<Filter>vulkan</Filter>
</None>
</ItemGroup>
<ItemGroup>
<MASM Include="fastjmp_x86.asm" />

View File

@ -1,307 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_cache.h"
#include "../file_system.h"
#include "../log.h"
#include "../md5_digest.h"
#include "shader_compiler.h"
#include <d3dcompiler.h>
Log_SetChannel(D3D11::ShaderCache);
namespace D3D11 {
#pragma pack(push, 1)
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
ShaderCache::ShaderCache() = default;
ShaderCache::~ShaderCache()
{
if (m_index_file)
std::fclose(m_index_file);
if (m_blob_file)
std::fclose(m_blob_file);
}
bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && shader_type == key.shader_type);
}
bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || shader_type != key.shader_type);
}
void ShaderCache::Open(std::string_view base_path, D3D_FEATURE_LEVEL feature_level, u32 version, bool debug)
{
m_feature_level = feature_level;
m_version = version;
m_debug = debug;
if (!base_path.empty())
{
const std::string base_filename = GetCacheBaseFileName(base_path, feature_level, debug);
const std::string index_filename = base_filename + ".idx";
const std::string blob_filename = base_filename + ".bin";
if (!ReadExisting(index_filename, blob_filename))
CreateNew(index_filename, blob_filename);
}
}
bool ShaderCache::CreateNew(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFile(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFile(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 ||
std::fwrite(&m_version, sizeof(m_version), 1, m_index_file) != 1)
{
Log_ErrorPrintf("Failed to write version to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
return true;
}
bool ShaderCache::ReadExisting(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
return false;
u32 file_version = 0;
u32 data_version = 0;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION ||
std::fread(&data_version, sizeof(data_version), 1, m_index_file) != 1 || data_version != m_version)
{
Log_ErrorPrintf("Bad file/data version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length,
static_cast<ShaderCompiler::Type>(entry.shader_type)};
const CacheIndexData data{entry.file_offset, entry.blob_size};
m_index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(m_index_file, 0, SEEK_END);
Log_InfoPrintf("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
std::string ShaderCache::GetCacheBaseFileName(const std::string_view& base_path, D3D_FEATURE_LEVEL feature_level,
bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "d3d_shaders_";
switch (feature_level)
{
case D3D_FEATURE_LEVEL_10_0:
base_filename += "sm40";
break;
case D3D_FEATURE_LEVEL_10_1:
base_filename += "sm41";
break;
case D3D_FEATURE_LEVEL_11_0:
base_filename += "sm50";
break;
default:
base_filename += "unk";
break;
}
if (debug)
base_filename += "_debug";
return base_filename;
}
ShaderCache::CacheIndexKey ShaderCache::GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code)
{
union
{
struct
{
u64 hash_low;
u64 hash_high;
};
u8 hash[16];
};
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(hash);
return CacheIndexKey{hash_low, hash_high, static_cast<u32>(shader_code.length()), type};
}
ShaderCache::ComPtr<ID3DBlob> ShaderCache::GetShaderBlob(ShaderCompiler::Type type, std::string_view shader_code)
{
const auto key = GetCacheKey(type, shader_code);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddShaderBlob(key, shader_code);
ComPtr<ID3DBlob> blob;
HRESULT hr = D3DCreateBlob(iter->second.blob_size, blob.GetAddressOf());
if (FAILED(hr) || std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(blob->GetBufferPointer(), 1, iter->second.blob_size, m_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed");
return {};
}
return blob;
}
ShaderCache::ComPtr<ID3D11VertexShader> ShaderCache::GetVertexShader(ID3D11Device* device, std::string_view shader_code)
{
ComPtr<ID3DBlob> blob = GetShaderBlob(ShaderCompiler::Type::Vertex, std::move(shader_code));
if (!blob)
return {};
return D3D11::ShaderCompiler::CreateVertexShader(device, blob.Get());
}
ShaderCache::ComPtr<ID3D11GeometryShader> ShaderCache::GetGeometryShader(ID3D11Device* device,
std::string_view shader_code)
{
ComPtr<ID3DBlob> blob = GetShaderBlob(ShaderCompiler::Type::Geometry, std::move(shader_code));
if (!blob)
return {};
return D3D11::ShaderCompiler::CreateGeometryShader(device, blob.Get());
}
ShaderCache::ComPtr<ID3D11PixelShader> ShaderCache::GetPixelShader(ID3D11Device* device, std::string_view shader_code)
{
ComPtr<ID3DBlob> blob = GetShaderBlob(ShaderCompiler::Type::Pixel, std::move(shader_code));
if (!blob)
return {};
return D3D11::ShaderCompiler::CreatePixelShader(device, blob.Get());
}
ShaderCache::ComPtr<ID3D11ComputeShader> ShaderCache::GetComputeShader(ID3D11Device* device,
std::string_view shader_code)
{
ComPtr<ID3DBlob> blob = GetShaderBlob(ShaderCompiler::Type::Compute, std::move(shader_code));
if (!blob)
return {};
return D3D11::ShaderCompiler::CreateComputeShader(device, blob.Get());
}
ShaderCache::ComPtr<ID3DBlob> ShaderCache::CompileAndAddShaderBlob(const CacheIndexKey& key,
std::string_view shader_code)
{
ComPtr<ID3DBlob> blob = ShaderCompiler::CompileShader(key.shader_type, m_feature_level, shader_code, m_debug);
if (!blob)
return {};
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return blob;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(blob->GetBufferSize());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.shader_type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(blob->GetBufferPointer(), 1, entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Log_ErrorPrintf("Failed to write shader blob to file");
return blob;
}
m_index.emplace(key, data);
return blob;
}
} // namespace D3D11

View File

@ -1,88 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../hash_combine.h"
#include "../types.h"
#include "../windows_headers.h"
#include "shader_compiler.h"
#include <cstdio>
#include <d3d11.h>
#include <string_view>
#include <unordered_map>
#include <vector>
#include <wrl/client.h>
namespace D3D11 {
class ShaderCache
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
ShaderCache();
~ShaderCache();
void Open(std::string_view base_path, D3D_FEATURE_LEVEL feature_level, u32 version, bool debug);
ComPtr<ID3DBlob> GetShaderBlob(ShaderCompiler::Type type, std::string_view shader_code);
ComPtr<ID3D11VertexShader> GetVertexShader(ID3D11Device* device, std::string_view shader_code);
ComPtr<ID3D11GeometryShader> GetGeometryShader(ID3D11Device* device, std::string_view shader_code);
ComPtr<ID3D11PixelShader> GetPixelShader(ID3D11Device* device, std::string_view shader_code);
ComPtr<ID3D11ComputeShader> GetComputeShader(ID3D11Device* device, std::string_view shader_code);
private:
static constexpr u32 FILE_VERSION = 2;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
ShaderCompiler::Type shader_type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
hash_combine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
static std::string GetCacheBaseFileName(const std::string_view& base_path, D3D_FEATURE_LEVEL feature_level,
bool debug);
static CacheIndexKey GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code);
bool CreateNew(const std::string& index_filename, const std::string& blob_filename);
bool ReadExisting(const std::string& index_filename, const std::string& blob_filename);
void Close();
ComPtr<ID3DBlob> CompileAndAddShaderBlob(const CacheIndexKey& key, std::string_view shader_code);
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
CacheIndex m_index;
D3D_FEATURE_LEVEL m_feature_level = D3D_FEATURE_LEVEL_11_0;
u32 m_version = 0;
bool m_debug = false;
};
} // namespace D3D11

View File

@ -1,202 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_compiler.h"
#include "../log.h"
#include "../string_util.h"
#include <array>
#include <d3dcompiler.h>
#include <fstream>
Log_SetChannel(D3D11);
namespace D3D11::ShaderCompiler {
static unsigned s_next_bad_shader_id = 1;
ComPtr<ID3DBlob> CompileShader(Type type, D3D_FEATURE_LEVEL feature_level, std::string_view code, bool debug)
{
const char* target;
switch (feature_level)
{
case D3D_FEATURE_LEVEL_10_0:
{
static constexpr std::array<const char*, 4> targets = {{"vs_4_0", "gs_4_0", "ps_4_0", "cs_4_0"}};
target = targets[static_cast<int>(type)];
}
break;
case D3D_FEATURE_LEVEL_10_1:
{
static constexpr std::array<const char*, 4> targets = {{"vs_4_1", "gs_4_1", "ps_4_1", "cs_4_1"}};
target = targets[static_cast<int>(type)];
}
break;
case D3D_FEATURE_LEVEL_11_0:
{
static constexpr std::array<const char*, 4> targets = {{"vs_5_0", "gs_5_0", "ps_5_0", "cs_5_0"}};
target = targets[static_cast<int>(type)];
}
break;
case D3D_FEATURE_LEVEL_11_1:
default:
{
static constexpr std::array<const char*, 4> targets = {{"vs_5_1", "gs_5_1", "ps_5_1", "cs_5_1"}};
target = targets[static_cast<int>(type)];
}
break;
}
static constexpr UINT flags_non_debug = D3DCOMPILE_OPTIMIZATION_LEVEL3;
static constexpr UINT flags_debug = D3DCOMPILE_SKIP_OPTIMIZATION | D3DCOMPILE_DEBUG;
ComPtr<ID3DBlob> blob;
ComPtr<ID3DBlob> error_blob;
const HRESULT hr =
D3DCompile(code.data(), code.size(), "0", nullptr, nullptr, "main", target, debug ? flags_debug : flags_non_debug,
0, blob.GetAddressOf(), error_blob.GetAddressOf());
std::string error_string;
if (error_blob)
{
error_string.append(static_cast<const char*>(error_blob->GetBufferPointer()), error_blob->GetBufferSize());
error_blob.Reset();
}
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to compile '%s':\n%s", target, error_string.c_str());
std::ofstream ofs(StringUtil::StdStringFromFormat("bad_shader_%u.txt", s_next_bad_shader_id++).c_str(),
std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs << code;
ofs << "\n\nCompile as " << target << " failed: " << hr << "\n";
ofs.write(error_string.c_str(), error_string.size());
ofs.close();
}
return {};
}
if (!error_string.empty())
Log_WarningPrintf("'%s' compiled with warnings:\n%s", target, error_string.c_str());
return blob;
}
ComPtr<ID3D11VertexShader> CompileAndCreateVertexShader(ID3D11Device* device, std::string_view code, bool debug)
{
ComPtr<ID3DBlob> blob = CompileShader(Type::Vertex, device->GetFeatureLevel(), std::move(code), debug);
if (!blob)
return {};
return CreateVertexShader(device, blob.Get());
}
ComPtr<ID3D11GeometryShader> CompileAndCreateGeometryShader(ID3D11Device* device, std::string_view code, bool debug)
{
ComPtr<ID3DBlob> blob = CompileShader(Type::Geometry, device->GetFeatureLevel(), std::move(code), debug);
if (!blob)
return {};
return CreateGeometryShader(device, blob.Get());
}
ComPtr<ID3D11PixelShader> CompileAndCreatePixelShader(ID3D11Device* device, std::string_view code, bool debug)
{
ComPtr<ID3DBlob> blob = CompileShader(Type::Pixel, device->GetFeatureLevel(), std::move(code), debug);
if (!blob)
return {};
return CreatePixelShader(device, blob.Get());
}
ComPtr<ID3D11ComputeShader> CompileAndCreateComputeShader(ID3D11Device* device, std::string_view code, bool debug)
{
ComPtr<ID3DBlob> blob = CompileShader(Type::Compute, device->GetFeatureLevel(), std::move(code), debug);
if (!blob)
return {};
return CreateComputeShader(device, blob.Get());
}
ComPtr<ID3D11VertexShader> CreateVertexShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length)
{
ComPtr<ID3D11VertexShader> shader;
const HRESULT hr = device->CreateVertexShader(bytecode, bytecode_length, nullptr, shader.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to create vertex shader: 0x%08X", hr);
return {};
}
return shader;
}
ComPtr<ID3D11VertexShader> CreateVertexShader(ID3D11Device* device, const ID3DBlob* blob)
{
return CreateVertexShader(device, const_cast<ID3DBlob*>(blob)->GetBufferPointer(),
const_cast<ID3DBlob*>(blob)->GetBufferSize());
}
ComPtr<ID3D11GeometryShader> CreateGeometryShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length)
{
ComPtr<ID3D11GeometryShader> shader;
const HRESULT hr = device->CreateGeometryShader(bytecode, bytecode_length, nullptr, shader.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to create geometry shader: 0x%08X", hr);
return {};
}
return shader;
}
ComPtr<ID3D11GeometryShader> CreateGeometryShader(ID3D11Device* device, const ID3DBlob* blob)
{
return CreateGeometryShader(device, const_cast<ID3DBlob*>(blob)->GetBufferPointer(),
const_cast<ID3DBlob*>(blob)->GetBufferSize());
}
ComPtr<ID3D11PixelShader> CreatePixelShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length)
{
ComPtr<ID3D11PixelShader> shader;
const HRESULT hr = device->CreatePixelShader(bytecode, bytecode_length, nullptr, shader.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to create pixel shader: 0x%08X", hr);
return {};
}
return shader;
}
ComPtr<ID3D11PixelShader> CreatePixelShader(ID3D11Device* device, const ID3DBlob* blob)
{
return CreatePixelShader(device, const_cast<ID3DBlob*>(blob)->GetBufferPointer(),
const_cast<ID3DBlob*>(blob)->GetBufferSize());
}
ComPtr<ID3D11ComputeShader> CreateComputeShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length)
{
ComPtr<ID3D11ComputeShader> shader;
const HRESULT hr = device->CreateComputeShader(bytecode, bytecode_length, nullptr, shader.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to create compute shader: 0x%08X", hr);
return {};
}
return shader;
}
ComPtr<ID3D11ComputeShader> CreateComputeShader(ID3D11Device* device, const ID3DBlob* blob)
{
return CreateComputeShader(device, const_cast<ID3DBlob*>(blob)->GetBufferPointer(),
const_cast<ID3DBlob*>(blob)->GetBufferSize());
}
} // namespace D3D11::ShaderCompiler

View File

@ -1,39 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../windows_headers.h"
#include <d3d11.h>
#include <string_view>
#include <type_traits>
#include <wrl/client.h>
namespace D3D11::ShaderCompiler {
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
enum class Type
{
Vertex,
Geometry,
Pixel,
Compute
};
ComPtr<ID3DBlob> CompileShader(Type type, D3D_FEATURE_LEVEL feature_level, std::string_view code, bool debug);
ComPtr<ID3D11VertexShader> CompileAndCreateVertexShader(ID3D11Device* device, std::string_view code, bool debug);
ComPtr<ID3D11GeometryShader> CompileAndCreateGeometryShader(ID3D11Device* device, std::string_view code, bool debug);
ComPtr<ID3D11PixelShader> CompileAndCreatePixelShader(ID3D11Device* device, std::string_view code, bool debug);
ComPtr<ID3D11ComputeShader> CompileAndCreateComputeShader(ID3D11Device* device, std::string_view code, bool debug);
ComPtr<ID3D11VertexShader> CreateVertexShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length);
ComPtr<ID3D11VertexShader> CreateVertexShader(ID3D11Device* device, const ID3DBlob* blob);
ComPtr<ID3D11GeometryShader> CreateGeometryShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length);
ComPtr<ID3D11GeometryShader> CreateGeometryShader(ID3D11Device* device, const ID3DBlob* blob);
ComPtr<ID3D11PixelShader> CreatePixelShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length);
ComPtr<ID3D11PixelShader> CreatePixelShader(ID3D11Device* device, const ID3DBlob* blob);
ComPtr<ID3D11ComputeShader> CreateComputeShader(ID3D11Device* device, const void* bytecode, size_t bytecode_length);
ComPtr<ID3D11ComputeShader> CreateComputeShader(ID3D11Device* device, const ID3DBlob* blob);
}; // namespace D3D11::ShaderCompiler

View File

@ -1,187 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "texture.h"
#include "../log.h"
#include <array>
Log_SetChannel(D3D11);
static constexpr std::array<DXGI_FORMAT, static_cast<u32>(GPUTexture::Format::Count)> s_dxgi_mapping = {
{DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_D16_UNORM}};
D3D11::Texture::Texture() = default;
D3D11::Texture::Texture(ComPtr<ID3D11Texture2D> texture, ComPtr<ID3D11ShaderResourceView> srv,
ComPtr<ID3D11RenderTargetView> rtv)
: m_texture(std::move(texture)), m_srv(std::move(srv)), m_rtv(std::move(rtv))
{
const D3D11_TEXTURE2D_DESC desc = GetDesc();
m_width = static_cast<u16>(desc.Width);
m_height = static_cast<u16>(desc.Height);
m_layers = static_cast<u8>(desc.ArraySize);
m_levels = static_cast<u8>(desc.MipLevels);
m_samples = static_cast<u8>(desc.SampleDesc.Count);
m_format = LookupBaseFormat(desc.Format);
m_dynamic = (desc.Usage == D3D11_USAGE_DYNAMIC);
}
D3D11::Texture::~Texture()
{
Destroy();
}
DXGI_FORMAT D3D11::Texture::GetDXGIFormat(Format format)
{
return s_dxgi_mapping[static_cast<u8>(format)];
}
GPUTexture::Format D3D11::Texture::LookupBaseFormat(DXGI_FORMAT dformat)
{
for (u32 i = 0; i < static_cast<u32>(s_dxgi_mapping.size()); i++)
{
if (s_dxgi_mapping[i] == dformat)
return static_cast<Format>(i);
}
return GPUTexture::Format::Unknown;
}
D3D11_TEXTURE2D_DESC D3D11::Texture::GetDesc() const
{
D3D11_TEXTURE2D_DESC desc;
m_texture->GetDesc(&desc);
return desc;
}
bool D3D11::Texture::IsValid() const
{
return static_cast<bool>(m_texture);
}
bool D3D11::Texture::Create(ID3D11Device* device, u32 width, u32 height, u32 layers, u32 levels, u32 samples,
Format format, u32 bind_flags, const void* initial_data /* = nullptr */,
u32 initial_data_stride /* = 0 */, bool dynamic /* = false */)
{
if (width > D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION || height > D3D11_REQ_TEXTURE2D_U_OR_V_DIMENSION ||
layers > D3D11_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION || (layers > 1 && samples > 1))
{
Log_ErrorPrintf("Texture bounds (%ux%ux%u, %u mips, %u samples) are too large", width, height, layers, levels,
samples);
return false;
}
CD3D11_TEXTURE2D_DESC desc(GetDXGIFormat(format), width, height, layers, levels, bind_flags,
dynamic ? D3D11_USAGE_DYNAMIC : D3D11_USAGE_DEFAULT, dynamic ? D3D11_CPU_ACCESS_WRITE : 0,
samples, 0, 0);
D3D11_SUBRESOURCE_DATA srd;
srd.pSysMem = initial_data;
srd.SysMemPitch = initial_data_stride;
srd.SysMemSlicePitch = initial_data_stride * height;
ComPtr<ID3D11Texture2D> texture;
const HRESULT tex_hr = device->CreateTexture2D(&desc, initial_data ? &srd : nullptr, texture.GetAddressOf());
if (FAILED(tex_hr))
{
Log_ErrorPrintf(
"Create texture failed: 0x%08X (%ux%u levels:%u samples:%u format:%u bind_flags:%X initial_data:%p)", tex_hr,
width, height, levels, samples, static_cast<unsigned>(format), bind_flags, initial_data);
return false;
}
ComPtr<ID3D11ShaderResourceView> srv;
if (bind_flags & D3D11_BIND_SHADER_RESOURCE)
{
const D3D11_SRV_DIMENSION srv_dimension =
(desc.SampleDesc.Count > 1) ?
D3D11_SRV_DIMENSION_TEXTURE2DMS :
(desc.ArraySize > 1 ? D3D11_SRV_DIMENSION_TEXTURE2DARRAY : D3D11_SRV_DIMENSION_TEXTURE2D);
const CD3D11_SHADER_RESOURCE_VIEW_DESC srv_desc(srv_dimension, desc.Format, 0, desc.MipLevels, 0, desc.ArraySize);
const HRESULT hr = device->CreateShaderResourceView(texture.Get(), &srv_desc, srv.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Create SRV for texture failed: 0x%08X", hr);
return false;
}
}
ComPtr<ID3D11RenderTargetView> rtv;
if (bind_flags & D3D11_BIND_RENDER_TARGET)
{
const D3D11_RTV_DIMENSION rtv_dimension =
(desc.SampleDesc.Count > 1) ? D3D11_RTV_DIMENSION_TEXTURE2DMS : D3D11_RTV_DIMENSION_TEXTURE2D;
const CD3D11_RENDER_TARGET_VIEW_DESC rtv_desc(rtv_dimension, desc.Format, 0, 0, desc.ArraySize);
const HRESULT hr = device->CreateRenderTargetView(texture.Get(), &rtv_desc, rtv.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Create RTV for texture failed: 0x%08X", hr);
return false;
}
}
m_texture = std::move(texture);
m_srv = std::move(srv);
m_rtv = std::move(rtv);
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_layers = static_cast<u8>(layers);
m_levels = static_cast<u8>(levels);
m_samples = static_cast<u8>(samples);
m_format = format;
m_dynamic = dynamic;
return true;
}
bool D3D11::Texture::Adopt(ID3D11Device* device, ComPtr<ID3D11Texture2D> texture)
{
D3D11_TEXTURE2D_DESC desc;
texture->GetDesc(&desc);
ComPtr<ID3D11ShaderResourceView> srv;
if (desc.BindFlags & D3D11_BIND_SHADER_RESOURCE)
{
const D3D11_SRV_DIMENSION srv_dimension =
(desc.SampleDesc.Count > 1) ? D3D11_SRV_DIMENSION_TEXTURE2DMS : D3D11_SRV_DIMENSION_TEXTURE2D;
const CD3D11_SHADER_RESOURCE_VIEW_DESC srv_desc(srv_dimension, desc.Format, 0, desc.MipLevels, 0, desc.ArraySize);
const HRESULT hr = device->CreateShaderResourceView(texture.Get(), &srv_desc, srv.ReleaseAndGetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Create SRV for adopted texture failed: 0x%08X", hr);
return false;
}
}
ComPtr<ID3D11RenderTargetView> rtv;
if (desc.BindFlags & D3D11_BIND_RENDER_TARGET)
{
const D3D11_RTV_DIMENSION rtv_dimension =
(desc.SampleDesc.Count > 1) ? D3D11_RTV_DIMENSION_TEXTURE2DMS : D3D11_RTV_DIMENSION_TEXTURE2D;
const CD3D11_RENDER_TARGET_VIEW_DESC rtv_desc(rtv_dimension, desc.Format, 0, 0, desc.ArraySize);
const HRESULT hr = device->CreateRenderTargetView(texture.Get(), &rtv_desc, rtv.ReleaseAndGetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("Create RTV for adopted texture failed: 0x%08X", hr);
return false;
}
}
m_texture = std::move(texture);
m_srv = std::move(srv);
m_rtv = std::move(rtv);
m_width = static_cast<u16>(desc.Width);
m_height = static_cast<u16>(desc.Height);
m_layers = static_cast<u8>(desc.ArraySize);
m_levels = static_cast<u8>(desc.MipLevels);
m_samples = static_cast<u8>(desc.SampleDesc.Count);
m_dynamic = (desc.Usage == D3D11_USAGE_DYNAMIC);
return true;
}
void D3D11::Texture::Destroy()
{
m_rtv.Reset();
m_srv.Reset();
m_texture.Reset();
m_dynamic = false;
ClearBaseProperties();
}

View File

@ -1,55 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../gpu_texture.h"
#include "../windows_headers.h"
#include <d3d11.h>
#include <wrl/client.h>
namespace D3D11 {
class Texture final : public GPUTexture
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
Texture();
Texture(ComPtr<ID3D11Texture2D> texture, ComPtr<ID3D11ShaderResourceView> srv, ComPtr<ID3D11RenderTargetView> rtv);
~Texture();
static DXGI_FORMAT GetDXGIFormat(Format format);
static Format LookupBaseFormat(DXGI_FORMAT dformat);
ALWAYS_INLINE ID3D11Texture2D* GetD3DTexture() const { return m_texture.Get(); }
ALWAYS_INLINE ID3D11ShaderResourceView* GetD3DSRV() const { return m_srv.Get(); }
ALWAYS_INLINE ID3D11RenderTargetView* GetD3DRTV() const { return m_rtv.Get(); }
ALWAYS_INLINE ID3D11ShaderResourceView* const* GetD3DSRVArray() const { return m_srv.GetAddressOf(); }
ALWAYS_INLINE ID3D11RenderTargetView* const* GetD3DRTVArray() const { return m_rtv.GetAddressOf(); }
ALWAYS_INLINE DXGI_FORMAT GetDXGIFormat() const { return GetDXGIFormat(m_format); }
ALWAYS_INLINE bool IsDynamic() const { return m_dynamic; }
ALWAYS_INLINE operator ID3D11Texture2D*() const { return m_texture.Get(); }
ALWAYS_INLINE operator ID3D11ShaderResourceView*() const { return m_srv.Get(); }
ALWAYS_INLINE operator ID3D11RenderTargetView*() const { return m_rtv.Get(); }
ALWAYS_INLINE operator bool() const { return static_cast<bool>(m_texture); }
D3D11_TEXTURE2D_DESC GetDesc() const;
bool IsValid() const override;
bool Create(ID3D11Device* device, u32 width, u32 height, u32 layers, u32 levels, u32 samples, Format format,
u32 bind_flags, const void* initial_data = nullptr, u32 initial_data_stride = 0, bool dynamic = false);
bool Adopt(ID3D11Device* device, ComPtr<ID3D11Texture2D> texture);
void Destroy();
private:
ComPtr<ID3D11Texture2D> m_texture;
ComPtr<ID3D11ShaderResourceView> m_srv;
ComPtr<ID3D11RenderTargetView> m_rtv;
bool m_dynamic = false;
};
} // namespace D3D11

View File

@ -1,556 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
// Parts originally from Dolphin Emulator, also written by myself.
#include "context.h"
#include "../assert.h"
#include "../log.h"
#include "../scoped_guard.h"
#include <algorithm>
#include <array>
#include <dxgi1_2.h>
#include <queue>
#include <vector>
Log_SetChannel(D3D12::Context);
std::unique_ptr<D3D12::Context> g_d3d12_context;
namespace D3D12 {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
// Private D3D12 state
static HMODULE s_d3d12_library;
static PFN_D3D12_CREATE_DEVICE s_d3d12_create_device;
static PFN_D3D12_GET_DEBUG_INTERFACE s_d3d12_get_debug_interface;
static PFN_D3D12_SERIALIZE_ROOT_SIGNATURE s_d3d12_serialize_root_signature;
static bool LoadD3D12Library()
{
if ((s_d3d12_library = LoadLibrary("d3d12.dll")) == nullptr ||
(s_d3d12_create_device =
reinterpret_cast<PFN_D3D12_CREATE_DEVICE>(GetProcAddress(s_d3d12_library, "D3D12CreateDevice"))) == nullptr ||
(s_d3d12_get_debug_interface = reinterpret_cast<PFN_D3D12_GET_DEBUG_INTERFACE>(
GetProcAddress(s_d3d12_library, "D3D12GetDebugInterface"))) == nullptr ||
(s_d3d12_serialize_root_signature = reinterpret_cast<PFN_D3D12_SERIALIZE_ROOT_SIGNATURE>(
GetProcAddress(s_d3d12_library, "D3D12SerializeRootSignature"))) == nullptr)
{
Log_ErrorPrintf("d3d12.dll could not be loaded.");
s_d3d12_create_device = nullptr;
s_d3d12_get_debug_interface = nullptr;
s_d3d12_serialize_root_signature = nullptr;
if (s_d3d12_library)
FreeLibrary(s_d3d12_library);
s_d3d12_library = nullptr;
return false;
}
return true;
}
static void UnloadD3D12Library()
{
s_d3d12_serialize_root_signature = nullptr;
s_d3d12_get_debug_interface = nullptr;
s_d3d12_create_device = nullptr;
if (s_d3d12_library)
{
FreeLibrary(s_d3d12_library);
s_d3d12_library = nullptr;
}
}
#else
static const PFN_D3D12_CREATE_DEVICE s_d3d12_create_device = D3D12CreateDevice;
static const PFN_D3D12_GET_DEBUG_INTERFACE s_d3d12_get_debug_interface = D3D12GetDebugInterface;
static const PFN_D3D12_SERIALIZE_ROOT_SIGNATURE s_d3d12_serialize_root_signature = D3D12SerializeRootSignature;
static bool LoadD3D12Library()
{
return true;
}
static void UnloadD3D12Library() {}
#endif
Context::Context() = default;
Context::~Context()
{
DestroyResources();
}
Context::ComPtr<ID3DBlob> Context::SerializeRootSignature(const D3D12_ROOT_SIGNATURE_DESC* desc)
{
ComPtr<ID3DBlob> blob;
ComPtr<ID3DBlob> error_blob;
const HRESULT hr = s_d3d12_serialize_root_signature(desc, D3D_ROOT_SIGNATURE_VERSION_1, blob.GetAddressOf(),
error_blob.GetAddressOf());
if (FAILED(hr))
{
Log_ErrorPrintf("D3D12SerializeRootSignature() failed: %08X", hr);
if (error_blob)
Log_ErrorPrintf("%s", error_blob->GetBufferPointer());
return {};
}
return blob;
}
D3D12::Context::ComPtr<ID3D12RootSignature> Context::CreateRootSignature(const D3D12_ROOT_SIGNATURE_DESC* desc)
{
ComPtr<ID3DBlob> blob = SerializeRootSignature(desc);
if (!blob)
return {};
ComPtr<ID3D12RootSignature> rs;
const HRESULT hr =
m_device->CreateRootSignature(0, blob->GetBufferPointer(), blob->GetBufferSize(), IID_PPV_ARGS(rs.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("CreateRootSignature() failed: %08X", hr);
return {};
}
return rs;
}
bool Context::SupportsTextureFormat(DXGI_FORMAT format)
{
constexpr u32 required = D3D12_FORMAT_SUPPORT1_TEXTURE2D | D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE;
D3D12_FEATURE_DATA_FORMAT_SUPPORT support = {format};
return SUCCEEDED(m_device->CheckFeatureSupport(D3D12_FEATURE_FORMAT_SUPPORT, &support, sizeof(support))) &&
(support.Support1 & required) == required;
}
bool Context::Create(IDXGIFactory* dxgi_factory, u32 adapter_index, bool enable_debug_layer)
{
Assert(!g_d3d12_context);
if (!LoadD3D12Library())
return false;
g_d3d12_context.reset(new Context());
if (!g_d3d12_context->CreateDevice(dxgi_factory, adapter_index, enable_debug_layer) ||
!g_d3d12_context->CreateCommandQueue() || !g_d3d12_context->CreateFence() ||
!g_d3d12_context->CreateDescriptorHeaps() || !g_d3d12_context->CreateCommandLists() ||
!g_d3d12_context->CreateTimestampQuery() || !g_d3d12_context->CreateTextureStreamBuffer())
{
Destroy();
return false;
}
return true;
}
void Context::Destroy()
{
if (g_d3d12_context)
g_d3d12_context.reset();
UnloadD3D12Library();
}
bool Context::CreateDevice(IDXGIFactory* dxgi_factory, u32 adapter_index, bool enable_debug_layer)
{
ComPtr<IDXGIAdapter> adapter;
HRESULT hr = dxgi_factory->EnumAdapters(adapter_index, &adapter);
if (FAILED(hr))
{
Log_ErrorPrintf("Adapter %u not found, using default", adapter_index);
adapter = nullptr;
}
else
{
DXGI_ADAPTER_DESC adapter_desc;
if (SUCCEEDED(adapter->GetDesc(&adapter_desc)))
{
char adapter_name_buffer[128];
const int name_length = WideCharToMultiByte(CP_UTF8, 0, adapter_desc.Description,
static_cast<int>(std::wcslen(adapter_desc.Description)),
adapter_name_buffer, countof(adapter_name_buffer), 0, nullptr);
if (name_length >= 0)
{
adapter_name_buffer[name_length] = 0;
Log_InfoPrintf("D3D Adapter: %s", adapter_name_buffer);
}
}
}
// Enabling the debug layer will fail if the Graphics Tools feature is not installed.
if (enable_debug_layer)
{
hr = s_d3d12_get_debug_interface(IID_PPV_ARGS(&m_debug_interface));
if (SUCCEEDED(hr))
{
m_debug_interface->EnableDebugLayer();
}
else
{
Log_ErrorPrintf("Debug layer requested but not available.");
enable_debug_layer = false;
}
}
// Create the actual device.
hr = s_d3d12_create_device(adapter.Get(), D3D_FEATURE_LEVEL_11_0, IID_PPV_ARGS(&m_device));
AssertMsg(SUCCEEDED(hr), "Create D3D12 device");
if (FAILED(hr))
return false;
if (enable_debug_layer)
{
ComPtr<ID3D12InfoQueue> info_queue;
if (SUCCEEDED(m_device.As(&info_queue)))
{
info_queue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_ERROR, TRUE);
info_queue->SetBreakOnSeverity(D3D12_MESSAGE_SEVERITY_WARNING, TRUE);
D3D12_INFO_QUEUE_FILTER filter = {};
std::array<D3D12_MESSAGE_ID, 5> id_list{
D3D12_MESSAGE_ID_CLEARRENDERTARGETVIEW_MISMATCHINGCLEARVALUE,
D3D12_MESSAGE_ID_CLEARDEPTHSTENCILVIEW_MISMATCHINGCLEARVALUE,
D3D12_MESSAGE_ID_CREATEGRAPHICSPIPELINESTATE_RENDERTARGETVIEW_NOT_SET,
D3D12_MESSAGE_ID_CREATEINPUTLAYOUT_TYPE_MISMATCH,
D3D12_MESSAGE_ID_DRAW_EMPTY_SCISSOR_RECTANGLE,
};
filter.DenyList.NumIDs = static_cast<UINT>(id_list.size());
filter.DenyList.pIDList = id_list.data();
info_queue->PushStorageFilter(&filter);
}
}
return true;
}
bool Context::CreateCommandQueue()
{
const D3D12_COMMAND_QUEUE_DESC queue_desc = {D3D12_COMMAND_LIST_TYPE_DIRECT, D3D12_COMMAND_QUEUE_PRIORITY_NORMAL,
D3D12_COMMAND_QUEUE_FLAG_NONE};
HRESULT hr = m_device->CreateCommandQueue(&queue_desc, IID_PPV_ARGS(&m_command_queue));
AssertMsg(SUCCEEDED(hr), "Create command queue");
return SUCCEEDED(hr);
}
bool Context::CreateFence()
{
HRESULT hr = m_device->CreateFence(m_completed_fence_value, D3D12_FENCE_FLAG_NONE, IID_PPV_ARGS(&m_fence));
AssertMsg(SUCCEEDED(hr), "Create fence");
if (FAILED(hr))
return false;
m_fence_event = CreateEvent(nullptr, FALSE, FALSE, nullptr);
AssertMsg(m_fence_event != NULL, "Create fence event");
if (!m_fence_event)
return false;
return true;
}
bool Context::CreateDescriptorHeaps()
{
static constexpr size_t MAX_SRVS = 16384;
static constexpr size_t MAX_RTVS = 8192;
static constexpr size_t MAX_DSVS = 128;
static constexpr size_t MAX_SAMPLERS = 128;
if (!m_descriptor_heap_manager.Create(m_device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV, MAX_SRVS, true) ||
!m_rtv_heap_manager.Create(m_device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_RTV, MAX_RTVS, false) ||
!m_dsv_heap_manager.Create(m_device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_DSV, MAX_DSVS, false) ||
!m_sampler_heap_manager.Create(m_device.Get(), D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER, MAX_SAMPLERS, true))
{
return false;
}
m_gpu_descriptor_heaps[0] = m_descriptor_heap_manager.GetDescriptorHeap();
m_gpu_descriptor_heaps[1] = m_sampler_heap_manager.GetDescriptorHeap();
// Allocate null SRV descriptor for unbound textures.
constexpr D3D12_SHADER_RESOURCE_VIEW_DESC null_srv_desc = {DXGI_FORMAT_R8G8B8A8_UNORM, D3D12_SRV_DIMENSION_TEXTURE2D,
D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING};
if (!m_descriptor_heap_manager.Allocate(&m_null_srv_descriptor))
{
Panic("Failed to allocate null descriptor");
return false;
}
m_device->CreateShaderResourceView(nullptr, &null_srv_desc, m_null_srv_descriptor.cpu_handle);
return true;
}
bool Context::CreateCommandLists()
{
for (u32 i = 0; i < NUM_COMMAND_LISTS; i++)
{
CommandListResources& res = m_command_lists[i];
HRESULT hr = m_device->CreateCommandAllocator(D3D12_COMMAND_LIST_TYPE_DIRECT,
IID_PPV_ARGS(res.command_allocator.GetAddressOf()));
AssertMsg(SUCCEEDED(hr), "Create command allocator");
if (FAILED(hr))
return false;
hr = m_device->CreateCommandList(1, D3D12_COMMAND_LIST_TYPE_DIRECT, res.command_allocator.Get(), nullptr,
IID_PPV_ARGS(res.command_list.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("Failed to create command list: %08X", hr);
return false;
}
// Close the command list, since the first thing we do is reset them.
hr = res.command_list->Close();
AssertMsg(SUCCEEDED(hr), "Closing new command list failed");
if (FAILED(hr))
return false;
}
MoveToNextCommandList();
return true;
}
bool Context::CreateTextureStreamBuffer()
{
return m_texture_stream_buffer.Create(TEXTURE_UPLOAD_BUFFER_SIZE);
}
void Context::MoveToNextCommandList()
{
m_current_command_list = (m_current_command_list + 1) % NUM_COMMAND_LISTS;
m_current_fence_value++;
// We may have to wait if this command list hasn't finished on the GPU.
CommandListResources& res = m_command_lists[m_current_command_list];
WaitForFence(res.ready_fence_value);
res.ready_fence_value = m_current_fence_value;
// Begin command list.
res.command_allocator->Reset();
res.command_list->Reset(res.command_allocator.Get(), nullptr);
if (res.has_timestamp_query)
{
// readback timestamp from the last time this cmdlist was used.
// we don't need to worry about disjoint in dx12, the frequency is reliable within a single cmdlist.
const u32 offset = (m_current_command_list * (sizeof(u64) * NUM_TIMESTAMP_QUERIES_PER_CMDLIST));
const D3D12_RANGE read_range = {offset, offset + (sizeof(u64) * NUM_TIMESTAMP_QUERIES_PER_CMDLIST)};
void* map;
HRESULT hr = m_timestamp_query_buffer->Map(0, &read_range, &map);
if (SUCCEEDED(hr))
{
u64 timestamps[2];
std::memcpy(timestamps, static_cast<const u8*>(map) + offset, sizeof(timestamps));
m_accumulated_gpu_time +=
static_cast<float>(static_cast<double>(timestamps[1] - timestamps[0]) / m_timestamp_frequency);
const D3D12_RANGE write_range = {};
m_timestamp_query_buffer->Unmap(0, &write_range);
}
else
{
Log_WarningPrintf("Map() for timestamp query failed: %08X", hr);
}
}
res.has_timestamp_query = m_gpu_timing_enabled;
if (m_gpu_timing_enabled)
{
res.command_list->EndQuery(m_timestamp_query_heap.Get(), D3D12_QUERY_TYPE_TIMESTAMP,
m_current_command_list * NUM_TIMESTAMP_QUERIES_PER_CMDLIST);
}
res.command_list->SetDescriptorHeaps(static_cast<UINT>(m_gpu_descriptor_heaps.size()), m_gpu_descriptor_heaps.data());
}
void Context::ExecuteCommandList(bool wait_for_completion)
{
CommandListResources& res = m_command_lists[m_current_command_list];
HRESULT hr;
if (res.has_timestamp_query)
{
// write the timestamp back at the end of the cmdlist
res.command_list->EndQuery(m_timestamp_query_heap.Get(), D3D12_QUERY_TYPE_TIMESTAMP,
(m_current_command_list * NUM_TIMESTAMP_QUERIES_PER_CMDLIST) + 1);
res.command_list->ResolveQueryData(m_timestamp_query_heap.Get(), D3D12_QUERY_TYPE_TIMESTAMP,
m_current_command_list * NUM_TIMESTAMP_QUERIES_PER_CMDLIST,
NUM_TIMESTAMP_QUERIES_PER_CMDLIST, m_timestamp_query_buffer.Get(),
m_current_command_list * (sizeof(u64) * NUM_TIMESTAMP_QUERIES_PER_CMDLIST));
}
// Close and queue command list.
hr = res.command_list->Close();
AssertMsg(SUCCEEDED(hr), "Close command list");
const std::array<ID3D12CommandList*, 1> execute_lists{res.command_list.Get()};
m_command_queue->ExecuteCommandLists(static_cast<UINT>(execute_lists.size()), execute_lists.data());
// Update fence when GPU has completed.
hr = m_command_queue->Signal(m_fence.Get(), m_current_fence_value);
AssertMsg(SUCCEEDED(hr), "Signal fence");
MoveToNextCommandList();
if (wait_for_completion)
WaitForFence(res.ready_fence_value);
}
void Context::DeferResourceDestruction(ID3D12Resource* resource)
{
if (!resource)
return;
resource->AddRef();
m_command_lists[m_current_command_list].pending_resources.push_back(resource);
}
void Context::DeferDescriptorDestruction(DescriptorHeapManager& manager, u32 index)
{
m_command_lists[m_current_command_list].pending_descriptors.emplace_back(manager, index);
}
void Context::DeferDescriptorDestruction(DescriptorHeapManager& manager, DescriptorHandle* handle)
{
if (handle->index == DescriptorHandle::INVALID_INDEX)
return;
m_command_lists[m_current_command_list].pending_descriptors.emplace_back(manager, handle->index);
handle->Clear();
}
void Context::DestroyPendingResources(CommandListResources& cmdlist)
{
for (const auto& dd : cmdlist.pending_descriptors)
dd.first.Free(dd.second);
cmdlist.pending_descriptors.clear();
for (ID3D12Resource* res : cmdlist.pending_resources)
res->Release();
cmdlist.pending_resources.clear();
}
void Context::DestroyResources()
{
ExecuteCommandList(true);
m_timestamp_query_buffer.Reset();
m_timestamp_query_heap.Reset();
m_texture_stream_buffer.Destroy(false);
m_descriptor_heap_manager.Free(&m_null_srv_descriptor);
m_sampler_heap_manager.Destroy();
m_dsv_heap_manager.Destroy();
m_rtv_heap_manager.Destroy();
m_descriptor_heap_manager.Destroy();
m_command_lists = {};
m_current_command_list = 0;
m_completed_fence_value = 0;
m_current_fence_value = 0;
if (m_fence_event)
{
CloseHandle(m_fence_event);
m_fence_event = {};
}
m_command_queue.Reset();
m_debug_interface.Reset();
m_device.Reset();
}
void Context::WaitForFence(u64 fence)
{
if (m_completed_fence_value >= fence)
return;
// Try non-blocking check.
m_completed_fence_value = m_fence->GetCompletedValue();
if (m_completed_fence_value < fence)
{
// Fall back to event.
HRESULT hr = m_fence->SetEventOnCompletion(fence, m_fence_event);
AssertMsg(SUCCEEDED(hr), "Set fence event on completion");
WaitForSingleObject(m_fence_event, INFINITE);
m_completed_fence_value = m_fence->GetCompletedValue();
}
// Release resources for as many command lists which have completed.
u32 index = (m_current_command_list + 1) % NUM_COMMAND_LISTS;
for (u32 i = 0; i < NUM_COMMAND_LISTS; i++)
{
CommandListResources& res = m_command_lists[index];
if (m_completed_fence_value < res.ready_fence_value)
break;
DestroyPendingResources(res);
index = (index + 1) % NUM_COMMAND_LISTS;
}
}
void Context::WaitForGPUIdle()
{
u32 index = (m_current_command_list + 1) % NUM_COMMAND_LISTS;
for (u32 i = 0; i < (NUM_COMMAND_LISTS - 1); i++)
{
WaitForFence(m_command_lists[index].ready_fence_value);
index = (index + 1) % NUM_COMMAND_LISTS;
}
}
bool Context::CreateTimestampQuery()
{
constexpr u32 QUERY_COUNT = NUM_TIMESTAMP_QUERIES_PER_CMDLIST * NUM_COMMAND_LISTS;
constexpr u32 BUFFER_SIZE = sizeof(u64) * QUERY_COUNT;
const D3D12_QUERY_HEAP_DESC desc = {D3D12_QUERY_HEAP_TYPE_TIMESTAMP, QUERY_COUNT};
HRESULT hr = m_device->CreateQueryHeap(&desc, IID_PPV_ARGS(m_timestamp_query_heap.ReleaseAndGetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("CreateQueryHeap() for timestamp failed with %08X", hr);
return false;
}
const D3D12_HEAP_PROPERTIES heap_properties = {D3D12_HEAP_TYPE_READBACK};
const D3D12_RESOURCE_DESC resource_desc = {D3D12_RESOURCE_DIMENSION_BUFFER,
0,
BUFFER_SIZE,
1,
1,
1,
DXGI_FORMAT_UNKNOWN,
{1, 0},
D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
D3D12_RESOURCE_FLAG_NONE};
hr = m_device->CreateCommittedResource(&heap_properties, D3D12_HEAP_FLAG_NONE, &resource_desc,
D3D12_RESOURCE_STATE_COPY_DEST, nullptr,
IID_PPV_ARGS(m_timestamp_query_buffer.ReleaseAndGetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("CreateResource() for timestamp failed with %08X", hr);
return false;
}
u64 frequency;
hr = m_command_queue->GetTimestampFrequency(&frequency);
if (FAILED(hr))
{
Log_ErrorPrintf("GetTimestampFrequency() failed: %08X", hr);
return false;
}
m_timestamp_frequency = static_cast<double>(frequency) / 1000.0;
return true;
}
float Context::GetAndResetAccumulatedGPUTime()
{
const float time = m_accumulated_gpu_time;
m_accumulated_gpu_time = 0.0f;
return time;
}
void Context::SetEnableGPUTiming(bool enabled)
{
m_gpu_timing_enabled = enabled;
}
} // namespace D3D12

View File

@ -1,156 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
// Parts originally from Dolphin Emulator, also written by myself.
#pragma once
#include "../types.h"
#include "../windows_headers.h"
#include "descriptor_heap_manager.h"
#include "stream_buffer.h"
#include <array>
#include <d3d12.h>
#include <memory>
#include <vector>
#include <wrl/client.h>
struct IDXGIFactory;
namespace D3D12 {
class Context
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
enum : u32
{
// Number of command lists. One is being built while the other(s) are executed.
NUM_COMMAND_LISTS = 3,
// Textures that don't fit into this buffer will be uploaded with a staging buffer.
TEXTURE_UPLOAD_BUFFER_SIZE = 16 * 1024 * 1024,
/// Start/End timestamp queries.
NUM_TIMESTAMP_QUERIES_PER_CMDLIST = 2,
};
~Context();
// Creates new device and context.
static bool Create(IDXGIFactory* dxgi_factory, u32 adapter_index, bool enable_debug_layer);
// Destroys active context.
static void Destroy();
ID3D12Device* GetDevice() const { return m_device.Get(); }
ID3D12CommandQueue* GetCommandQueue() const { return m_command_queue.Get(); }
// Returns the current command list, commands can be recorded directly.
ID3D12GraphicsCommandList* GetCommandList() const
{
return m_command_lists[m_current_command_list].command_list.Get();
}
// Descriptor manager access.
DescriptorHeapManager& GetDescriptorHeapManager() { return m_descriptor_heap_manager; }
DescriptorHeapManager& GetRTVHeapManager() { return m_rtv_heap_manager; }
DescriptorHeapManager& GetDSVHeapManager() { return m_dsv_heap_manager; }
DescriptorHeapManager& GetSamplerHeapManager() { return m_sampler_heap_manager; }
ID3D12DescriptorHeap* const* GetGPUDescriptorHeaps() const { return m_gpu_descriptor_heaps.data(); }
u32 GetGPUDescriptorHeapCount() const { return static_cast<u32>(m_gpu_descriptor_heaps.size()); }
const DescriptorHandle& GetNullSRVDescriptor() const { return m_null_srv_descriptor; }
StreamBuffer& GetTextureStreamBuffer() { return m_texture_stream_buffer; }
// Root signature access.
ComPtr<ID3DBlob> SerializeRootSignature(const D3D12_ROOT_SIGNATURE_DESC* desc);
ComPtr<ID3D12RootSignature> CreateRootSignature(const D3D12_ROOT_SIGNATURE_DESC* desc);
// Fence value for current command list.
u64 GetCurrentFenceValue() const { return m_current_fence_value; }
// Last "completed" fence.
u64 GetCompletedFenceValue() const { return m_completed_fence_value; }
// Feature level to use when compiling shaders.
D3D_FEATURE_LEVEL GetFeatureLevel() const { return m_feature_level; }
// Test for support for the specified texture format.
bool SupportsTextureFormat(DXGI_FORMAT format);
// Executes the current command list.
void ExecuteCommandList(bool wait_for_completion);
// Waits for a specific fence.
void WaitForFence(u64 fence);
// Waits for any in-flight command buffers to complete.
void WaitForGPUIdle();
// Defers destruction of a D3D resource (associates it with the current list).
void DeferResourceDestruction(ID3D12Resource* resource);
// Defers destruction of a descriptor handle (associates it with the current list).
void DeferDescriptorDestruction(DescriptorHeapManager& manager, u32 index);
void DeferDescriptorDestruction(DescriptorHeapManager& manager, DescriptorHandle* handle);
float GetAndResetAccumulatedGPUTime();
void SetEnableGPUTiming(bool enabled);
private:
struct CommandListResources
{
ComPtr<ID3D12CommandAllocator> command_allocator;
ComPtr<ID3D12GraphicsCommandList> command_list;
std::vector<ID3D12Resource*> pending_resources;
std::vector<std::pair<DescriptorHeapManager&, u32>> pending_descriptors;
u64 ready_fence_value = 0;
bool has_timestamp_query = false;
};
Context();
bool CreateDevice(IDXGIFactory* dxgi_factory, u32 adapter_index, bool enable_debug_layer);
bool CreateCommandQueue();
bool CreateFence();
bool CreateDescriptorHeaps();
bool CreateCommandLists();
bool CreateTextureStreamBuffer();
bool CreateTimestampQuery();
void MoveToNextCommandList();
void DestroyPendingResources(CommandListResources& cmdlist);
void DestroyResources();
ComPtr<ID3D12Debug> m_debug_interface;
ComPtr<ID3D12Device> m_device;
ComPtr<ID3D12CommandQueue> m_command_queue;
ComPtr<ID3D12Fence> m_fence = nullptr;
HANDLE m_fence_event = {};
u32 m_current_fence_value = 0;
u64 m_completed_fence_value = 0;
std::array<CommandListResources, NUM_COMMAND_LISTS> m_command_lists;
u32 m_current_command_list = NUM_COMMAND_LISTS - 1;
ComPtr<ID3D12QueryHeap> m_timestamp_query_heap;
ComPtr<ID3D12Resource> m_timestamp_query_buffer;
double m_timestamp_frequency = 0.0;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
DescriptorHeapManager m_descriptor_heap_manager;
DescriptorHeapManager m_rtv_heap_manager;
DescriptorHeapManager m_dsv_heap_manager;
DescriptorHeapManager m_sampler_heap_manager;
std::array<ID3D12DescriptorHeap*, 2> m_gpu_descriptor_heaps = {};
DescriptorHandle m_null_srv_descriptor;
StreamBuffer m_texture_stream_buffer;
D3D_FEATURE_LEVEL m_feature_level = D3D_FEATURE_LEVEL_11_0;
};
} // namespace D3D12
extern std::unique_ptr<D3D12::Context> g_d3d12_context;

View File

@ -1,116 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
// Parts originally from Dolphin Emulator, also written by myself.
#include "descriptor_heap_manager.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
Log_SetChannel(DescriptorHeapManager);
namespace D3D12 {
DescriptorHeapManager::DescriptorHeapManager() = default;
DescriptorHeapManager::~DescriptorHeapManager() = default;
bool DescriptorHeapManager::Create(ID3D12Device* device, D3D12_DESCRIPTOR_HEAP_TYPE type, u32 num_descriptors,
bool shader_visible)
{
D3D12_DESCRIPTOR_HEAP_DESC desc = {type, static_cast<UINT>(num_descriptors),
shader_visible ? D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE :
D3D12_DESCRIPTOR_HEAP_FLAG_NONE};
HRESULT hr = device->CreateDescriptorHeap(&desc, IID_PPV_ARGS(&m_descriptor_heap));
AssertMsg(SUCCEEDED(hr), "Create descriptor heap");
if (FAILED(hr))
return false;
m_heap_base_cpu = m_descriptor_heap->GetCPUDescriptorHandleForHeapStart();
if (shader_visible)
m_heap_base_gpu = m_descriptor_heap->GetGPUDescriptorHandleForHeapStart();
m_num_descriptors = num_descriptors;
m_descriptor_increment_size = device->GetDescriptorHandleIncrementSize(type);
// Set all slots to unallocated (1)
const u32 bitset_count = num_descriptors / BITSET_SIZE + (((num_descriptors % BITSET_SIZE) != 0) ? 1 : 0);
m_free_slots.resize(bitset_count);
for (BitSetType& bs : m_free_slots)
bs.flip();
return true;
}
void DescriptorHeapManager::Destroy()
{
for (BitSetType& bs : m_free_slots)
Assert(bs.all());
m_num_descriptors = 0;
m_descriptor_increment_size = 0;
m_heap_base_cpu = {};
m_heap_base_gpu = {};
m_descriptor_heap.Reset();
m_free_slots.clear();
}
bool DescriptorHeapManager::Allocate(DescriptorHandle* handle, u32 count /* = 1 */)
{
// Start past the temporary slots, no point in searching those.
for (u32 group = 0; group < m_free_slots.size(); group++)
{
BitSetType& bs = m_free_slots[group];
if (bs.none())
continue;
u32 bit = 0;
for (; bit < BITSET_SIZE; bit++)
{
if (bs[bit])
{
u32 offset;
for (offset = 0; offset < count; offset++)
{
if (!bs[bit + offset])
break;
}
if (offset == count)
break;
}
}
u32 index = group * BITSET_SIZE + bit;
for (u32 offset = 0; offset < count; offset++)
bs[bit + offset] = false;
handle->index = index;
handle->cpu_handle.ptr = m_heap_base_cpu.ptr + index * m_descriptor_increment_size;
handle->gpu_handle.ptr = m_heap_base_gpu.ptr + index * m_descriptor_increment_size;
return true;
}
Panic("Out of fixed descriptors");
return false;
}
void DescriptorHeapManager::Free(u32 index, u32 count /* = 1 */)
{
Assert(index < m_num_descriptors);
for (u32 i = 0; i < count; i++, index++)
{
u32 group = index / BITSET_SIZE;
u32 bit = index % BITSET_SIZE;
m_free_slots[group][bit] = true;
}
}
void DescriptorHeapManager::Free(DescriptorHandle* handle, u32 count /* = 1 */)
{
if (handle->index == DescriptorHandle::INVALID_INDEX)
return;
Free(handle->index, count);
handle->Clear();
}
} // namespace D3D12

View File

@ -1,84 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
// Parts originally from Dolphin Emulator, also written by myself.
#pragma once
#include "../types.h"
#include "../windows_headers.h"
#include <bitset>
#include <d3d12.h>
#include <map>
#include <vector>
#include <wrl/client.h>
namespace D3D12 {
// This class provides an abstraction for D3D12 descriptor heaps.
struct DescriptorHandle final
{
enum : u32
{
INVALID_INDEX = 0xFFFFFFFF
};
D3D12_CPU_DESCRIPTOR_HANDLE cpu_handle{};
D3D12_GPU_DESCRIPTOR_HANDLE gpu_handle{};
u32 index = INVALID_INDEX;
ALWAYS_INLINE operator bool() const { return index != INVALID_INDEX; }
ALWAYS_INLINE operator D3D12_CPU_DESCRIPTOR_HANDLE() const { return cpu_handle; }
ALWAYS_INLINE operator D3D12_GPU_DESCRIPTOR_HANDLE() const { return gpu_handle; }
ALWAYS_INLINE void Clear()
{
cpu_handle = {};
gpu_handle = {};
index = INVALID_INDEX;
}
};
class DescriptorHeapManager final
{
public:
DescriptorHeapManager();
~DescriptorHeapManager();
ALWAYS_INLINE ID3D12DescriptorHeap* GetDescriptorHeap() const { return m_descriptor_heap.Get(); }
ALWAYS_INLINE u32 GetDescriptorIncrementSize() const { return m_descriptor_increment_size; }
ALWAYS_INLINE D3D12_CPU_DESCRIPTOR_HANDLE OffsetCPUHandle(D3D12_CPU_DESCRIPTOR_HANDLE handle, u32 count) const
{
D3D12_CPU_DESCRIPTOR_HANDLE ret;
ret.ptr = handle.ptr + m_descriptor_increment_size * count;
return ret;
}
ALWAYS_INLINE D3D12_GPU_DESCRIPTOR_HANDLE OffsetGPUHandle(D3D12_GPU_DESCRIPTOR_HANDLE handle, u32 count) const
{
D3D12_GPU_DESCRIPTOR_HANDLE ret;
ret.ptr = handle.ptr + m_descriptor_increment_size * count;
return ret;
}
bool Create(ID3D12Device* device, D3D12_DESCRIPTOR_HEAP_TYPE type, u32 num_descriptors, bool shader_visible);
void Destroy();
bool Allocate(DescriptorHandle* handle, u32 count = 1);
void Free(DescriptorHandle* handle, u32 count = 1);
void Free(u32 index, u32 count = 1);
private:
Microsoft::WRL::ComPtr<ID3D12DescriptorHeap> m_descriptor_heap;
u32 m_num_descriptors = 0;
u32 m_descriptor_increment_size = 0;
D3D12_CPU_DESCRIPTOR_HANDLE m_heap_base_cpu = {};
D3D12_GPU_DESCRIPTOR_HANDLE m_heap_base_gpu = {};
static constexpr u32 BITSET_SIZE = 1024;
using BitSetType = std::bitset<BITSET_SIZE>;
std::vector<BitSetType> m_free_slots = {};
};
} // namespace D3D12

View File

@ -1,461 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_cache.h"
#include "../d3d11/shader_compiler.h"
#include "../file_system.h"
#include "../log.h"
#include "../md5_digest.h"
#include <d3dcompiler.h>
Log_SetChannel(D3D12::ShaderCache);
namespace D3D12 {
#pragma pack(push, 1)
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
ShaderCache::ShaderCache() = default;
ShaderCache::~ShaderCache()
{
if (m_pipeline_index_file)
std::fclose(m_pipeline_index_file);
if (m_pipeline_blob_file)
std::fclose(m_pipeline_blob_file);
if (m_shader_index_file)
std::fclose(m_shader_index_file);
if (m_shader_blob_file)
std::fclose(m_shader_blob_file);
}
bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && type == key.type);
}
bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || type != key.type);
}
void ShaderCache::Open(std::string_view base_path, D3D_FEATURE_LEVEL feature_level, bool debug)
{
m_base_path = base_path;
m_feature_level = feature_level;
m_debug = debug;
if (!base_path.empty())
{
const std::string base_shader_filename = GetCacheBaseFileName(base_path, "shaders", feature_level, debug);
const std::string shader_index_filename = base_shader_filename + ".idx";
const std::string shader_blob_filename = base_shader_filename + ".bin";
if (!ReadExisting(shader_index_filename, shader_blob_filename, m_shader_index_file, m_shader_blob_file,
m_shader_index))
{
CreateNew(shader_index_filename, shader_blob_filename, m_shader_index_file, m_shader_blob_file);
}
const std::string base_pipelines_filename = GetCacheBaseFileName(base_path, "pipelines", feature_level, debug);
const std::string pipelines_index_filename = base_pipelines_filename + ".idx";
const std::string pipelines_blob_filename = base_pipelines_filename + ".bin";
if (!ReadExisting(pipelines_index_filename, pipelines_blob_filename, m_pipeline_index_file, m_pipeline_blob_file,
m_pipeline_index))
{
CreateNew(pipelines_index_filename, pipelines_blob_filename, m_pipeline_index_file, m_pipeline_blob_file);
}
}
}
void ShaderCache::InvalidatePipelineCache()
{
m_pipeline_index.clear();
if (m_pipeline_blob_file)
{
std::fclose(m_pipeline_blob_file);
m_pipeline_blob_file = nullptr;
}
if (m_pipeline_index_file)
{
std::fclose(m_pipeline_index_file);
m_pipeline_index_file = nullptr;
}
const std::string base_pipelines_filename = GetCacheBaseFileName(m_base_path, "pipelines", m_feature_level, m_debug);
const std::string pipelines_index_filename = base_pipelines_filename + ".idx";
const std::string pipelines_blob_filename = base_pipelines_filename + ".bin";
CreateNew(pipelines_index_filename, pipelines_blob_filename, m_pipeline_index_file, m_pipeline_blob_file);
}
bool ShaderCache::CreateNew(const std::string& index_filename, const std::string& blob_filename, std::FILE*& index_file,
std::FILE*& blob_file)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFile(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFile(blob_filename.c_str());
}
index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!index_file)
{
Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
if (std::fwrite(&index_version, sizeof(index_version), 1, index_file) != 1)
{
Log_ErrorPrintf("Failed to write version to index file '%s'", index_filename.c_str());
std::fclose(index_file);
index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!blob_file)
{
Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(blob_file);
blob_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
return true;
}
bool ShaderCache::ReadExisting(const std::string& index_filename, const std::string& blob_filename,
std::FILE*& index_file, std::FILE*& blob_file, CacheIndex& index)
{
index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!index_file)
return false;
u32 file_version;
if (std::fread(&file_version, sizeof(file_version), 1, index_file) != 1 || file_version != FILE_VERSION)
{
Log_ErrorPrintf("Bad file version in '%s'", index_filename.c_str());
std::fclose(index_file);
index_file = nullptr;
return false;
}
blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!blob_file)
{
Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(index_file);
index_file = nullptr;
return false;
}
std::fseek(blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, index_file) != 1 || (entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(index_file))
break;
Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
index.clear();
std::fclose(blob_file);
blob_file = nullptr;
std::fclose(index_file);
index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length,
static_cast<EntryType>(entry.shader_type)};
const CacheIndexData data{entry.file_offset, entry.blob_size};
index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(index_file, 0, SEEK_END);
Log_InfoPrintf("Read %zu entries from '%s'", index.size(), index_filename.c_str());
return true;
}
std::string ShaderCache::GetCacheBaseFileName(const std::string_view& base_path, const std::string_view& type,
D3D_FEATURE_LEVEL feature_level, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "d3d12_";
base_filename += type;
base_filename += "_";
switch (feature_level)
{
case D3D_FEATURE_LEVEL_10_0:
base_filename += "sm40";
break;
case D3D_FEATURE_LEVEL_10_1:
base_filename += "sm41";
break;
case D3D_FEATURE_LEVEL_11_0:
base_filename += "sm50";
break;
default:
base_filename += "unk";
break;
}
if (debug)
base_filename += "_debug";
return base_filename;
}
union MD5Hash
{
struct
{
u64 low;
u64 high;
};
u8 hash[16];
};
ShaderCache::CacheIndexKey ShaderCache::GetShaderCacheKey(EntryType type, const std::string_view& shader_code)
{
MD5Hash h;
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(h.hash);
return CacheIndexKey{h.low, h.high, static_cast<u32>(shader_code.length()), type};
}
ShaderCache::CacheIndexKey ShaderCache::GetPipelineCacheKey(const D3D12_GRAPHICS_PIPELINE_STATE_DESC& gpdesc)
{
MD5Digest digest;
u32 length = sizeof(D3D12_GRAPHICS_PIPELINE_STATE_DESC);
if (gpdesc.VS.BytecodeLength > 0)
{
digest.Update(gpdesc.VS.pShaderBytecode, static_cast<u32>(gpdesc.VS.BytecodeLength));
length += static_cast<u32>(gpdesc.VS.BytecodeLength);
}
if (gpdesc.GS.BytecodeLength > 0)
{
digest.Update(gpdesc.GS.pShaderBytecode, static_cast<u32>(gpdesc.GS.BytecodeLength));
length += static_cast<u32>(gpdesc.GS.BytecodeLength);
}
if (gpdesc.PS.BytecodeLength > 0)
{
digest.Update(gpdesc.PS.pShaderBytecode, static_cast<u32>(gpdesc.PS.BytecodeLength));
length += static_cast<u32>(gpdesc.PS.BytecodeLength);
}
digest.Update(&gpdesc.BlendState, sizeof(gpdesc.BlendState));
digest.Update(&gpdesc.SampleMask, sizeof(gpdesc.SampleMask));
digest.Update(&gpdesc.RasterizerState, sizeof(gpdesc.RasterizerState));
digest.Update(&gpdesc.DepthStencilState, sizeof(gpdesc.DepthStencilState));
for (u32 i = 0; i < gpdesc.InputLayout.NumElements; i++)
{
const D3D12_INPUT_ELEMENT_DESC& ie = gpdesc.InputLayout.pInputElementDescs[i];
digest.Update(ie.SemanticName, static_cast<u32>(std::strlen(ie.SemanticName)));
digest.Update(&ie.SemanticIndex, sizeof(ie.SemanticIndex));
digest.Update(&ie.Format, sizeof(ie.Format));
digest.Update(&ie.InputSlot, sizeof(ie.InputSlot));
digest.Update(&ie.AlignedByteOffset, sizeof(ie.AlignedByteOffset));
digest.Update(&ie.InputSlotClass, sizeof(ie.InputSlotClass));
digest.Update(&ie.InstanceDataStepRate, sizeof(ie.InstanceDataStepRate));
length += sizeof(D3D12_INPUT_ELEMENT_DESC);
}
digest.Update(&gpdesc.IBStripCutValue, sizeof(gpdesc.IBStripCutValue));
digest.Update(&gpdesc.PrimitiveTopologyType, sizeof(gpdesc.PrimitiveTopologyType));
digest.Update(&gpdesc.NumRenderTargets, sizeof(gpdesc.NumRenderTargets));
digest.Update(gpdesc.RTVFormats, sizeof(gpdesc.RTVFormats));
digest.Update(&gpdesc.DSVFormat, sizeof(gpdesc.DSVFormat));
digest.Update(&gpdesc.SampleDesc, sizeof(gpdesc.SampleDesc));
digest.Update(&gpdesc.Flags, sizeof(gpdesc.Flags));
MD5Hash h;
digest.Final(h.hash);
return CacheIndexKey{h.low, h.high, length, EntryType::GraphicsPipeline};
}
ShaderCache::ComPtr<ID3DBlob> ShaderCache::GetShaderBlob(EntryType type, std::string_view shader_code)
{
const auto key = GetShaderCacheKey(type, shader_code);
auto iter = m_shader_index.find(key);
if (iter == m_shader_index.end())
return CompileAndAddShaderBlob(key, shader_code);
ComPtr<ID3DBlob> blob;
HRESULT hr = D3DCreateBlob(iter->second.blob_size, blob.GetAddressOf());
if (FAILED(hr) || std::fseek(m_shader_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(blob->GetBufferPointer(), 1, iter->second.blob_size, m_shader_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed");
return {};
}
return blob;
}
ShaderCache::ComPtr<ID3D12PipelineState> ShaderCache::GetPipelineState(ID3D12Device* device,
const D3D12_GRAPHICS_PIPELINE_STATE_DESC& desc)
{
const auto key = GetPipelineCacheKey(desc);
auto iter = m_pipeline_index.find(key);
if (iter == m_pipeline_index.end())
return CompileAndAddPipeline(device, key, desc);
ComPtr<ID3DBlob> blob;
HRESULT hr = D3DCreateBlob(iter->second.blob_size, blob.GetAddressOf());
if (FAILED(hr) || std::fseek(m_pipeline_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(blob->GetBufferPointer(), 1, iter->second.blob_size, m_pipeline_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed");
return {};
}
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc_with_blob(desc);
desc_with_blob.CachedPSO.pCachedBlob = blob->GetBufferPointer();
desc_with_blob.CachedPSO.CachedBlobSizeInBytes = blob->GetBufferSize();
ComPtr<ID3D12PipelineState> pso;
hr = device->CreateGraphicsPipelineState(&desc_with_blob, IID_PPV_ARGS(pso.GetAddressOf()));
if (FAILED(hr))
{
Log_WarningPrintf("Creating cached PSO failed: %08X. Invalidating cache.", hr);
InvalidatePipelineCache();
pso = CompileAndAddPipeline(device, key, desc);
}
return pso;
}
ShaderCache::ComPtr<ID3DBlob> ShaderCache::CompileAndAddShaderBlob(const CacheIndexKey& key,
std::string_view shader_code)
{
ComPtr<ID3DBlob> blob;
switch (key.type)
{
case EntryType::VertexShader:
blob = D3D11::ShaderCompiler::CompileShader(D3D11::ShaderCompiler::Type::Vertex, m_feature_level, shader_code,
m_debug);
break;
case EntryType::GeometryShader:
blob = D3D11::ShaderCompiler::CompileShader(D3D11::ShaderCompiler::Type::Geometry, m_feature_level, shader_code,
m_debug);
break;
case EntryType::PixelShader:
blob =
D3D11::ShaderCompiler::CompileShader(D3D11::ShaderCompiler::Type::Pixel, m_feature_level, shader_code, m_debug);
break;
default:
break;
}
if (!blob)
return {};
if (!m_shader_blob_file || std::fseek(m_shader_blob_file, 0, SEEK_END) != 0)
return blob;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_shader_blob_file));
data.blob_size = static_cast<u32>(blob->GetBufferSize());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(blob->GetBufferPointer(), 1, entry.blob_size, m_shader_blob_file) != entry.blob_size ||
std::fflush(m_shader_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_shader_index_file) != 1 ||
std::fflush(m_shader_index_file) != 0)
{
Log_ErrorPrintf("Failed to write shader blob to file");
return blob;
}
m_shader_index.emplace(key, data);
return blob;
}
ShaderCache::ComPtr<ID3D12PipelineState>
ShaderCache::CompileAndAddPipeline(ID3D12Device* device, const CacheIndexKey& key,
const D3D12_GRAPHICS_PIPELINE_STATE_DESC& gpdesc)
{
ComPtr<ID3D12PipelineState> pso;
HRESULT hr = device->CreateGraphicsPipelineState(&gpdesc, IID_PPV_ARGS(pso.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("Creating cached PSO failed: %08X", hr);
return {};
}
if (!m_pipeline_blob_file || std::fseek(m_pipeline_blob_file, 0, SEEK_END) != 0)
return pso;
ComPtr<ID3DBlob> blob;
hr = pso->GetCachedBlob(blob.GetAddressOf());
if (FAILED(hr))
{
Log_WarningPrintf("Failed to get cached PSO data: %08X", hr);
return pso;
}
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_pipeline_blob_file));
data.blob_size = static_cast<u32>(blob->GetBufferSize());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(blob->GetBufferPointer(), 1, entry.blob_size, m_pipeline_blob_file) != entry.blob_size ||
std::fflush(m_pipeline_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_pipeline_index_file) != 1 ||
std::fflush(m_pipeline_index_file) != 0)
{
Log_ErrorPrintf("Failed to write pipeline blob to file");
return pso;
}
m_shader_index.emplace(key, data);
return pso;
}
} // namespace D3D12

View File

@ -1,120 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../hash_combine.h"
#include "../types.h"
#include "../windows_headers.h"
#include <cstdio>
#include <d3d12.h>
#include <string_view>
#include <unordered_map>
#include <vector>
#include <wrl/client.h>
namespace D3D12 {
class ShaderCache
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
enum class EntryType
{
VertexShader,
GeometryShader,
PixelShader,
ComputeShader,
GraphicsPipeline,
};
ShaderCache();
~ShaderCache();
void Open(std::string_view base_path, D3D_FEATURE_LEVEL feature_level, bool debug);
ALWAYS_INLINE ComPtr<ID3DBlob> GetVertexShader(std::string_view shader_code)
{
return GetShaderBlob(EntryType::VertexShader, shader_code);
}
ALWAYS_INLINE ComPtr<ID3DBlob> GetGeometryShader(std::string_view shader_code)
{
return GetShaderBlob(EntryType::GeometryShader, shader_code);
}
ALWAYS_INLINE ComPtr<ID3DBlob> GetPixelShader(std::string_view shader_code)
{
return GetShaderBlob(EntryType::PixelShader, shader_code);
}
ALWAYS_INLINE ComPtr<ID3DBlob> GetComputeShader(std::string_view shader_code)
{
return GetShaderBlob(EntryType::ComputeShader, shader_code);
}
ComPtr<ID3DBlob> GetShaderBlob(EntryType type, std::string_view shader_code);
ComPtr<ID3D12PipelineState> GetPipelineState(ID3D12Device* device, const D3D12_GRAPHICS_PIPELINE_STATE_DESC& desc);
private:
static constexpr u32 FILE_VERSION = 1;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
EntryType type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
hash_combine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
static std::string GetCacheBaseFileName(const std::string_view& base_path, const std::string_view& type,
D3D_FEATURE_LEVEL feature_level, bool debug);
static CacheIndexKey GetShaderCacheKey(EntryType type, const std::string_view& shader_code);
static CacheIndexKey GetPipelineCacheKey(const D3D12_GRAPHICS_PIPELINE_STATE_DESC& gpdesc);
bool CreateNew(const std::string& index_filename, const std::string& blob_filename, std::FILE*& index_file,
std::FILE*& blob_file);
bool ReadExisting(const std::string& index_filename, const std::string& blob_filename, std::FILE*& index_file,
std::FILE*& blob_file, CacheIndex& index);
void InvalidatePipelineCache();
void Close();
ComPtr<ID3DBlob> CompileAndAddShaderBlob(const CacheIndexKey& key, std::string_view shader_code);
ComPtr<ID3D12PipelineState> CompileAndAddPipeline(ID3D12Device* device, const CacheIndexKey& key,
const D3D12_GRAPHICS_PIPELINE_STATE_DESC& gpdesc);
std::string m_base_path;
std::FILE* m_shader_index_file = nullptr;
std::FILE* m_shader_blob_file = nullptr;
CacheIndex m_shader_index;
std::FILE* m_pipeline_index_file = nullptr;
std::FILE* m_pipeline_blob_file = nullptr;
CacheIndex m_pipeline_index;
D3D_FEATURE_LEVEL m_feature_level = D3D_FEATURE_LEVEL_11_0;
bool m_debug = false;
};
} // namespace D3D12

View File

@ -1,239 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "staging_texture.h"
#include "../align.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
#include "util.h"
Log_SetChannel(D3D12);
namespace D3D12 {
StagingTexture::StagingTexture() : m_width(0), m_height(0) {}
StagingTexture::~StagingTexture()
{
Destroy();
}
bool StagingTexture::Create(u32 width, u32 height, DXGI_FORMAT format, bool for_uploading)
{
const u32 texel_size = GetTexelSize(format);
const u32 row_pitch = Common::AlignUpPow2(width * texel_size, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
const u32 buffer_size = height * row_pitch;
const D3D12_HEAP_PROPERTIES heap_properties = {for_uploading ? D3D12_HEAP_TYPE_UPLOAD : D3D12_HEAP_TYPE_READBACK};
D3D12_RESOURCE_DESC desc = {};
desc.Dimension = D3D12_RESOURCE_DIMENSION_BUFFER;
desc.Width = buffer_size;
desc.Height = 1;
desc.DepthOrArraySize = 1;
desc.MipLevels = 1;
desc.Format = DXGI_FORMAT_UNKNOWN;
desc.SampleDesc.Count = 1;
desc.Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR;
desc.Flags = D3D12_RESOURCE_FLAG_NONE;
D3D12_RESOURCE_STATES state = for_uploading ? D3D12_RESOURCE_STATE_GENERIC_READ : D3D12_RESOURCE_STATE_COPY_DEST;
ComPtr<ID3D12Resource> resource;
HRESULT hr = g_d3d12_context->GetDevice()->CreateCommittedResource(
&heap_properties, D3D12_HEAP_FLAG_NONE, &desc, state, nullptr, IID_PPV_ARGS(resource.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("Create buffer failed: 0x%08X", hr);
return false;
}
Destroy(true);
m_resource = std::move(resource);
m_width = width;
m_height = height;
m_format = format;
m_buffer_size = buffer_size;
m_row_pitch = row_pitch;
m_texel_size = texel_size;
return true;
}
void StagingTexture::Destroy(bool defer)
{
if (IsMapped())
Unmap();
if (m_resource && defer)
g_d3d12_context->DeferResourceDestruction(m_resource.Get());
m_resource.Reset();
m_width = 0;
m_height = 0;
m_format = DXGI_FORMAT_UNKNOWN;
m_buffer_size = 0;
m_row_pitch = 0;
m_texel_size = 0;
}
bool StagingTexture::Map(bool writing)
{
D3D12_RANGE range{0u, m_buffer_size};
Assert(!IsMapped());
const HRESULT hr = m_resource->Map(0, writing ? nullptr : &range, &m_mapped_pointer);
if (FAILED(hr))
{
Log_ErrorPrintf("Map staging buffer failed: 0x%08X", hr);
return false;
}
m_mapped_for_write = writing;
return true;
}
void StagingTexture::Unmap()
{
Assert(IsMapped());
D3D12_RANGE range{0u, m_buffer_size};
m_resource->Unmap(0, m_mapped_for_write ? &range : nullptr);
m_mapped_pointer = nullptr;
m_mapped_for_write = false;
}
void StagingTexture::Flush()
{
if (!m_needs_flush)
return;
m_needs_flush = false;
// If the completed fence is the same as the current command buffer fence, we need to execute
// the current list and wait for it to complete. This is the slowest path. Otherwise, if the
// command list with the copy has been submitted, we only need to wait for the fence.
if (m_completed_fence == g_d3d12_context->GetCurrentFenceValue())
g_d3d12_context->ExecuteCommandList(true);
else
g_d3d12_context->WaitForFence(m_completed_fence);
}
void StagingTexture::CopyToTexture(u32 src_x, u32 src_y, ID3D12Resource* dst_texture, u32 dst_subresource, u32 dst_x,
u32 dst_y, u32 width, u32 height)
{
DebugAssert((src_x + width) <= m_width && (src_y + height) <= m_height);
D3D12_TEXTURE_COPY_LOCATION dst;
dst.pResource = dst_texture;
dst.SubresourceIndex = 0;
dst.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
D3D12_TEXTURE_COPY_LOCATION src;
src.pResource = m_resource.Get();
src.SubresourceIndex = 0;
src.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
src.PlacedFootprint.Offset = 0;
src.PlacedFootprint.Footprint.Width = m_width;
src.PlacedFootprint.Footprint.Height = m_height;
src.PlacedFootprint.Footprint.Depth = 1;
src.PlacedFootprint.Footprint.Format = m_format;
src.PlacedFootprint.Footprint.RowPitch = m_row_pitch;
const D3D12_BOX src_box{src_x, src_y, 0u, src_x + width, src_y + height, 1u};
g_d3d12_context->GetCommandList()->CopyTextureRegion(&dst, dst_x, dst_y, 0, &src, &src_box);
}
void StagingTexture::CopyFromTexture(ID3D12Resource* src_texture, u32 src_subresource, u32 src_x, u32 src_y, u32 dst_x,
u32 dst_y, u32 width, u32 height)
{
DebugAssert((dst_x + width) <= m_width && (dst_y + height) <= m_height);
D3D12_TEXTURE_COPY_LOCATION src;
src.pResource = src_texture;
src.SubresourceIndex = 0;
src.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
D3D12_TEXTURE_COPY_LOCATION dst;
dst.pResource = m_resource.Get();
dst.SubresourceIndex = 0;
dst.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
dst.PlacedFootprint.Offset = 0;
dst.PlacedFootprint.Footprint.Width = m_width;
dst.PlacedFootprint.Footprint.Height = m_height;
dst.PlacedFootprint.Footprint.Depth = 1;
dst.PlacedFootprint.Footprint.Format = m_format;
dst.PlacedFootprint.Footprint.RowPitch = m_row_pitch;
const D3D12_BOX src_box{src_x, src_y, 0u, src_x + width, src_y + height, 1u};
g_d3d12_context->GetCommandList()->CopyTextureRegion(&dst, dst_x, dst_y, 0, &src, &src_box);
m_completed_fence = g_d3d12_context->GetCurrentFenceValue();
m_needs_flush = true;
}
bool StagingTexture::ReadPixels(u32 x, u32 y, u32 width, u32 height, void* data, u32 row_pitch)
{
if (m_needs_flush)
Flush();
const bool was_mapped = IsMapped();
if (!was_mapped && !Map(false))
return false;
const u8* src_ptr = static_cast<u8*>(m_mapped_pointer) + (y * m_row_pitch) + (x * m_texel_size);
u8* dst_ptr = reinterpret_cast<u8*>(data);
if (m_row_pitch != row_pitch || width != m_width || x != 0)
{
const u32 copy_size = m_texel_size * width;
for (u32 row = 0; row < height; row++)
{
std::memcpy(dst_ptr, src_ptr, copy_size);
src_ptr += m_row_pitch;
dst_ptr += row_pitch;
}
}
else
{
std::memcpy(dst_ptr, src_ptr, row_pitch * height);
}
return true;
}
bool StagingTexture::WritePixels(u32 x, u32 y, u32 width, u32 height, const void* data, u32 row_pitch)
{
const bool was_mapped = IsMapped();
if (!was_mapped && !Map(true))
return false;
const u8* src_ptr = reinterpret_cast<const u8*>(data);
u8* dst_ptr = static_cast<u8*>(m_mapped_pointer) + (y * m_row_pitch) + (x * m_texel_size);
if (m_row_pitch != row_pitch || width != m_width || x != 0)
{
const u32 copy_size = m_texel_size * width;
for (u32 row = 0; row < height; row++)
{
std::memcpy(dst_ptr, src_ptr, copy_size);
src_ptr += row_pitch;
dst_ptr += m_row_pitch;
}
}
else
{
std::memcpy(dst_ptr, src_ptr, m_row_pitch * height);
}
if (!was_mapped)
Unmap();
return true;
}
bool StagingTexture::EnsureSize(u32 width, u32 height, DXGI_FORMAT format, bool for_uploading)
{
if (m_resource && m_width >= width && m_height >= height && m_format == format)
return true;
return Create(width, height, format, for_uploading);
}
} // namespace D3D12

View File

@ -1,65 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include "../windows_headers.h"
#include <cstring>
#include <d3d12.h>
#include <wrl/client.h>
namespace D3D12 {
class StagingTexture
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
StagingTexture();
~StagingTexture();
ALWAYS_INLINE ID3D12Resource* GetD3DResource() const { return m_resource.Get(); }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
ALWAYS_INLINE DXGI_FORMAT GetFormat() const { return m_format; }
ALWAYS_INLINE bool IsMapped() const { return m_mapped_pointer != nullptr; }
ALWAYS_INLINE const void* GetMapPointer() const { return m_mapped_pointer; }
ALWAYS_INLINE operator bool() const { return static_cast<bool>(m_resource); }
bool Create(u32 width, u32 height, DXGI_FORMAT format, bool for_uploading);
void Destroy(bool defer = true);
bool Map(bool writing);
void Unmap();
void Flush();
void CopyToTexture(u32 src_x, u32 src_y, ID3D12Resource* dst_texture, u32 dst_subresource, u32 dst_x, u32 dst_y,
u32 width, u32 height);
void CopyFromTexture(ID3D12Resource* src_texture, u32 src_subresource, u32 src_x, u32 src_y, u32 dst_x, u32 dst_y,
u32 width, u32 height);
bool ReadPixels(u32 x, u32 y, u32 width, u32 height, void* data, u32 row_pitch);
bool WritePixels(u32 x, u32 y, u32 width, u32 height, const void* data, u32 row_pitch);
bool EnsureSize(u32 width, u32 height, DXGI_FORMAT format, bool for_uploading);
protected:
ComPtr<ID3D12Resource> m_resource;
u32 m_width;
u32 m_height;
DXGI_FORMAT m_format;
u32 m_texel_size;
u32 m_row_pitch;
u32 m_buffer_size;
void* m_mapped_pointer = nullptr;
u64 m_completed_fence = 0;
bool m_mapped_for_write = false;
bool m_needs_flush = false;
};
} // namespace D3D12

View File

@ -1,437 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "texture.h"
#include "../align.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
#include "staging_texture.h"
#include "stream_buffer.h"
#include "util.h"
Log_SetChannel(D3D12);
static constexpr std::array<DXGI_FORMAT, static_cast<u32>(GPUTexture::Format::Count)> s_dxgi_mapping = {
{DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_D16_UNORM}};
D3D12::Texture::Texture() = default;
D3D12::Texture::Texture(ID3D12Resource* resource, D3D12_RESOURCE_STATES state) : m_resource(std::move(resource))
{
const D3D12_RESOURCE_DESC desc = GetDesc();
m_width = static_cast<u16>(desc.Width);
m_height = static_cast<u16>(desc.Height);
m_layers = static_cast<u8>(desc.DepthOrArraySize);
m_levels = static_cast<u8>(desc.MipLevels);
m_samples = static_cast<u8>(desc.SampleDesc.Count);
m_format = LookupBaseFormat(desc.Format);
}
D3D12::Texture::Texture(Texture&& texture)
: m_resource(std::move(texture.m_resource)), m_srv_descriptor(texture.m_srv_descriptor),
m_rtv_or_dsv_descriptor(texture.m_rtv_or_dsv_descriptor), m_is_depth_view(texture.m_is_depth_view)
{
m_width = texture.m_width;
m_height = texture.m_height;
m_layers = texture.m_layers;
m_levels = texture.m_levels;
m_samples = texture.m_samples;
texture.m_srv_descriptor = {};
texture.m_rtv_or_dsv_descriptor = {};
texture.m_state = D3D12_RESOURCE_STATE_COMMON;
texture.m_is_depth_view = false;
texture.ClearBaseProperties();
}
DXGI_FORMAT D3D12::Texture::GetDXGIFormat(Format format)
{
return s_dxgi_mapping[static_cast<u8>(format)];
}
GPUTexture::Format D3D12::Texture::LookupBaseFormat(DXGI_FORMAT dformat)
{
for (u32 i = 0; i < static_cast<u32>(s_dxgi_mapping.size()); i++)
{
if (s_dxgi_mapping[i] == dformat)
return static_cast<Format>(i);
}
return GPUTexture::Format::Unknown;
}
D3D12::Texture::~Texture()
{
Destroy();
}
D3D12::Texture& D3D12::Texture::operator=(Texture&& texture)
{
Destroy();
m_width = texture.m_width;
m_height = texture.m_height;
m_layers = texture.m_layers;
m_levels = texture.m_levels;
m_samples = texture.m_samples;
m_resource = std::move(texture.m_resource);
m_srv_descriptor = texture.m_srv_descriptor;
m_rtv_or_dsv_descriptor = texture.m_rtv_or_dsv_descriptor;
m_state = texture.m_state;
m_is_depth_view = texture.m_is_depth_view;
texture.ClearBaseProperties();
texture.m_srv_descriptor = {};
texture.m_rtv_or_dsv_descriptor = {};
texture.m_state = D3D12_RESOURCE_STATE_COMMON;
texture.m_is_depth_view = false;
return *this;
}
D3D12_RESOURCE_DESC D3D12::Texture::GetDesc() const
{
return m_resource->GetDesc();
}
bool D3D12::Texture::IsValid() const
{
return static_cast<bool>(m_resource);
}
bool D3D12::Texture::Create(u32 width, u32 height, u32 layers, u32 levels, u32 samples, DXGI_FORMAT format,
DXGI_FORMAT srv_format, DXGI_FORMAT rtv_format, DXGI_FORMAT dsv_format,
D3D12_RESOURCE_FLAGS flags)
{
constexpr D3D12_HEAP_PROPERTIES heap_properties = {D3D12_HEAP_TYPE_DEFAULT};
if (width > MAX_WIDTH || height > MAX_HEIGHT || layers > MAX_LAYERS || levels > MAX_LEVELS || samples > MAX_SAMPLES)
{
Log_ErrorPrintf("Invalid dimensions: %ux%ux%u %u %u", width, height, layers, levels, samples);
return false;
}
D3D12_RESOURCE_DESC desc = {};
desc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
desc.Width = width;
desc.Height = static_cast<u16>(height);
desc.DepthOrArraySize = static_cast<u16>(layers);
desc.MipLevels = static_cast<u16>(levels);
desc.Format = format;
desc.SampleDesc.Count = samples;
desc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
desc.Flags = flags;
D3D12_CLEAR_VALUE optimized_clear_value = {};
D3D12_RESOURCE_STATES state;
if (rtv_format != DXGI_FORMAT_UNKNOWN)
{
optimized_clear_value.Format = rtv_format;
state = D3D12_RESOURCE_STATE_RENDER_TARGET;
}
else if (dsv_format != DXGI_FORMAT_UNKNOWN)
{
optimized_clear_value.Format = dsv_format;
state = D3D12_RESOURCE_STATE_DEPTH_WRITE;
}
else
{
state = D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE;
}
ComPtr<ID3D12Resource> resource;
HRESULT hr = g_d3d12_context->GetDevice()->CreateCommittedResource(
&heap_properties, D3D12_HEAP_FLAG_NONE, &desc, state,
(rtv_format != DXGI_FORMAT_UNKNOWN || dsv_format != DXGI_FORMAT_UNKNOWN) ? &optimized_clear_value : nullptr,
IID_PPV_ARGS(resource.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("Create texture failed: 0x%08X", hr);
return false;
}
DescriptorHandle srv_descriptor, rtv_descriptor;
bool is_depth_view = false;
if (srv_format != DXGI_FORMAT_UNKNOWN)
{
if (!CreateSRVDescriptor(resource.Get(), srv_format, samples > 1, &srv_descriptor))
return false;
}
if (rtv_format != DXGI_FORMAT_UNKNOWN)
{
Assert(dsv_format == DXGI_FORMAT_UNKNOWN);
if (!CreateRTVDescriptor(resource.Get(), rtv_format, samples > 1, &rtv_descriptor))
{
g_d3d12_context->GetDescriptorHeapManager().Free(&srv_descriptor);
return false;
}
}
else if (dsv_format != DXGI_FORMAT_UNKNOWN)
{
if (!CreateDSVDescriptor(resource.Get(), dsv_format, samples > 1, &rtv_descriptor))
{
g_d3d12_context->GetDescriptorHeapManager().Free(&srv_descriptor);
return false;
}
is_depth_view = true;
}
Destroy(true);
m_resource = std::move(resource);
m_srv_descriptor = std::move(srv_descriptor);
m_rtv_or_dsv_descriptor = std::move(rtv_descriptor);
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_layers = static_cast<u8>(layers);
m_levels = static_cast<u8>(levels);
m_samples = static_cast<u8>(samples);
m_format = LookupBaseFormat(format);
m_state = state;
m_is_depth_view = is_depth_view;
return true;
}
bool D3D12::Texture::Adopt(ComPtr<ID3D12Resource> texture, DXGI_FORMAT srv_format, DXGI_FORMAT rtv_format,
DXGI_FORMAT dsv_format, D3D12_RESOURCE_STATES state)
{
const D3D12_RESOURCE_DESC desc(texture->GetDesc());
DescriptorHandle srv_descriptor, rtv_descriptor;
if (srv_format != DXGI_FORMAT_UNKNOWN)
{
if (!CreateSRVDescriptor(texture.Get(), srv_format, desc.SampleDesc.Count > 1, &srv_descriptor))
return false;
}
m_is_depth_view = false;
if (rtv_format != DXGI_FORMAT_UNKNOWN)
{
Assert(dsv_format == DXGI_FORMAT_UNKNOWN);
if (!CreateRTVDescriptor(texture.Get(), rtv_format, desc.SampleDesc.Count > 1, &rtv_descriptor))
{
g_d3d12_context->GetDescriptorHeapManager().Free(&srv_descriptor);
return false;
}
}
else if (dsv_format != DXGI_FORMAT_UNKNOWN)
{
if (!CreateDSVDescriptor(texture.Get(), dsv_format, desc.SampleDesc.Count > 1, &rtv_descriptor))
{
g_d3d12_context->GetDescriptorHeapManager().Free(&srv_descriptor);
return false;
}
m_is_depth_view = true;
}
m_resource = std::move(texture);
m_srv_descriptor = std::move(srv_descriptor);
m_rtv_or_dsv_descriptor = std::move(rtv_descriptor);
m_width = static_cast<u16>(desc.Width);
m_height = static_cast<u16>(desc.Height);
m_layers = static_cast<u8>(desc.DepthOrArraySize);
m_levels = static_cast<u8>(desc.MipLevels);
m_samples = static_cast<u8>(desc.SampleDesc.Count);
m_format = LookupBaseFormat(desc.Format);
m_state = state;
return true;
}
void D3D12::Texture::Destroy(bool defer /* = true */)
{
if (defer)
{
g_d3d12_context->DeferDescriptorDestruction(g_d3d12_context->GetDescriptorHeapManager(), &m_srv_descriptor);
if (m_is_depth_view)
g_d3d12_context->DeferDescriptorDestruction(g_d3d12_context->GetDSVHeapManager(), &m_rtv_or_dsv_descriptor);
else
g_d3d12_context->DeferDescriptorDestruction(g_d3d12_context->GetRTVHeapManager(), &m_rtv_or_dsv_descriptor);
g_d3d12_context->DeferResourceDestruction(m_resource.Get());
m_resource.Reset();
}
else
{
g_d3d12_context->GetDescriptorHeapManager().Free(&m_srv_descriptor);
if (m_is_depth_view)
g_d3d12_context->GetDSVHeapManager().Free(&m_rtv_or_dsv_descriptor);
else
g_d3d12_context->GetRTVHeapManager().Free(&m_rtv_or_dsv_descriptor);
m_resource.Reset();
}
ClearBaseProperties();
m_is_depth_view = false;
}
void D3D12::Texture::TransitionToState(D3D12_RESOURCE_STATES state) const
{
if (m_state == state)
return;
ResourceBarrier(g_d3d12_context->GetCommandList(), m_resource.Get(), m_state, state);
m_state = state;
}
bool D3D12::Texture::BeginStreamUpdate(u32 x, u32 y, u32 width, u32 height, void** out_data, u32* out_data_pitch)
{
const u32 copy_pitch = Common::AlignUpPow2(width * GetPixelSize(), D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
const u32 upload_size = copy_pitch * height;
if (!g_d3d12_context->GetTextureStreamBuffer().ReserveMemory(upload_size, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT))
{
Log_PerfPrintf("Executing command buffer while waiting for %u bytes (%ux%u) in upload buffer", upload_size, width,
height);
g_d3d12_context->ExecuteCommandList(false);
if (!g_d3d12_context->GetTextureStreamBuffer().ReserveMemory(upload_size, D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT))
{
Log_ErrorPrintf("Failed to reserve %u bytes for %ux%u upload", upload_size, width, height);
return false;
}
}
*out_data = g_d3d12_context->GetTextureStreamBuffer().GetCurrentHostPointer();
*out_data_pitch = copy_pitch;
return true;
}
void D3D12::Texture::EndStreamUpdate(u32 x, u32 y, u32 width, u32 height)
{
const u32 copy_pitch = Common::AlignUpPow2(width * GetPixelSize(), D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
const u32 upload_size = copy_pitch * height;
StreamBuffer& sb = g_d3d12_context->GetTextureStreamBuffer();
const u32 sb_offset = sb.GetCurrentOffset();
sb.CommitMemory(upload_size);
CopyFromBuffer(x, y, width, height, copy_pitch, sb.GetBuffer(), sb_offset);
}
void D3D12::Texture::CopyFromBuffer(u32 x, u32 y, u32 width, u32 height, u32 pitch, ID3D12Resource* buffer,
u32 buffer_offset)
{
D3D12_TEXTURE_COPY_LOCATION src;
src.pResource = buffer;
src.SubresourceIndex = 0;
src.Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT;
src.PlacedFootprint.Offset = buffer_offset;
src.PlacedFootprint.Footprint.Width = width;
src.PlacedFootprint.Footprint.Height = height;
src.PlacedFootprint.Footprint.Depth = 1;
src.PlacedFootprint.Footprint.RowPitch = pitch;
src.PlacedFootprint.Footprint.Format = GetDXGIFormat();
D3D12_TEXTURE_COPY_LOCATION dst;
dst.pResource = m_resource.Get();
dst.SubresourceIndex = 0;
dst.Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX;
const D3D12_BOX src_box{0u, 0u, 0u, width, height, 1u};
const D3D12_RESOURCE_STATES old_state = m_state;
TransitionToState(D3D12_RESOURCE_STATE_COPY_DEST);
g_d3d12_context->GetCommandList()->CopyTextureRegion(&dst, x, y, 0, &src, &src_box);
TransitionToState(old_state);
}
bool D3D12::Texture::LoadData(u32 x, u32 y, u32 width, u32 height, const void* data, u32 pitch)
{
const u32 texel_size = GetPixelSize();
const u32 upload_pitch = Common::AlignUpPow2(width * texel_size, D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
const u32 upload_size = upload_pitch * height;
if (upload_size >= g_d3d12_context->GetTextureStreamBuffer().GetSize())
{
StagingTexture st;
if (!st.Create(width, height, GetDXGIFormat(), true) || !st.WritePixels(0, 0, width, height, data, pitch))
return false;
D3D12_RESOURCE_STATES old_state = m_state;
TransitionToState(D3D12_RESOURCE_STATE_COPY_DEST);
st.CopyToTexture(0, 0, m_resource.Get(), 0, x, y, width, height);
st.Destroy(true);
TransitionToState(old_state);
return true;
}
void* write_ptr;
u32 write_pitch;
if (!BeginStreamUpdate(x, y, width, height, &write_ptr, &write_pitch))
return false;
CopyToUploadBuffer(data, pitch, height, write_ptr, write_pitch);
EndStreamUpdate(x, y, width, height);
return true;
}
void D3D12::Texture::CopyToUploadBuffer(const void* src_data, u32 src_pitch, u32 height, void* dst_data, u32 dst_pitch)
{
const u8* src_ptr = static_cast<const u8*>(src_data);
u8* dst_ptr = static_cast<u8*>(dst_data);
if (src_pitch == dst_pitch)
{
std::memcpy(dst_ptr, src_ptr, dst_pitch * height);
}
else
{
const u32 copy_size = std::min(src_pitch, dst_pitch);
for (u32 row = 0; row < height; row++)
{
std::memcpy(dst_ptr, src_ptr, copy_size);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
}
}
}
bool D3D12::Texture::CreateSRVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh)
{
if (!g_d3d12_context->GetDescriptorHeapManager().Allocate(dh))
{
Log_ErrorPrintf("Failed to allocate SRV descriptor");
return false;
}
D3D12_SHADER_RESOURCE_VIEW_DESC desc = {
format, multisampled ? D3D12_SRV_DIMENSION_TEXTURE2DMS : D3D12_SRV_DIMENSION_TEXTURE2D,
D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING};
if (!multisampled)
desc.Texture2D.MipLevels = 1;
g_d3d12_context->GetDevice()->CreateShaderResourceView(resource, &desc, dh->cpu_handle);
return true;
}
bool D3D12::Texture::CreateRTVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh)
{
if (!g_d3d12_context->GetRTVHeapManager().Allocate(dh))
{
Log_ErrorPrintf("Failed to allocate SRV descriptor");
return false;
}
D3D12_RENDER_TARGET_VIEW_DESC desc = {format,
multisampled ? D3D12_RTV_DIMENSION_TEXTURE2DMS : D3D12_RTV_DIMENSION_TEXTURE2D};
g_d3d12_context->GetDevice()->CreateRenderTargetView(resource, &desc, dh->cpu_handle);
return true;
}
bool D3D12::Texture::CreateDSVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh)
{
if (!g_d3d12_context->GetDSVHeapManager().Allocate(dh))
{
Log_ErrorPrintf("Failed to allocate SRV descriptor");
return false;
}
D3D12_DEPTH_STENCIL_VIEW_DESC desc = {
format, multisampled ? D3D12_DSV_DIMENSION_TEXTURE2DMS : D3D12_DSV_DIMENSION_TEXTURE2D, D3D12_DSV_FLAG_NONE};
g_d3d12_context->GetDevice()->CreateDepthStencilView(resource, &desc, dh->cpu_handle);
return true;
}

View File

@ -1,80 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../gpu_texture.h"
#include "../windows_headers.h"
#include "descriptor_heap_manager.h"
#include <d3d12.h>
#include <wrl/client.h>
namespace D3D12 {
class StreamBuffer;
class Texture final : public GPUTexture
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
Texture();
Texture(ID3D12Resource* resource, D3D12_RESOURCE_STATES state);
Texture(Texture&& texture);
Texture(const Texture&) = delete;
~Texture();
static DXGI_FORMAT GetDXGIFormat(Format format);
static Format LookupBaseFormat(DXGI_FORMAT dformat);
ALWAYS_INLINE ID3D12Resource* GetResource() const { return m_resource.Get(); }
ALWAYS_INLINE const DescriptorHandle& GetSRVDescriptor() const { return m_srv_descriptor; }
ALWAYS_INLINE const DescriptorHandle& GetRTVOrDSVDescriptor() const { return m_rtv_or_dsv_descriptor; }
ALWAYS_INLINE D3D12_RESOURCE_STATES GetState() const { return m_state; }
ALWAYS_INLINE DXGI_FORMAT GetDXGIFormat() const { return GetDXGIFormat(m_format); }
ALWAYS_INLINE operator ID3D12Resource*() const { return m_resource.Get(); }
ALWAYS_INLINE operator bool() const { return static_cast<bool>(m_resource); }
bool IsValid() const override;
bool Create(u32 width, u32 height, u32 layers, u32 levels, u32 samples, DXGI_FORMAT format, DXGI_FORMAT srv_format, DXGI_FORMAT rtv_format,
DXGI_FORMAT dsv_format, D3D12_RESOURCE_FLAGS flags);
bool Adopt(ComPtr<ID3D12Resource> texture, DXGI_FORMAT srv_format, DXGI_FORMAT rtv_format, DXGI_FORMAT dsv_format,
D3D12_RESOURCE_STATES state);
D3D12_RESOURCE_DESC GetDesc() const;
void Destroy(bool defer = true);
void TransitionToState(D3D12_RESOURCE_STATES state) const;
Texture& operator=(const Texture&) = delete;
Texture& operator=(Texture&& texture);
bool BeginStreamUpdate(u32 x, u32 y, u32 width, u32 height, void** out_data, u32* out_data_pitch);
void EndStreamUpdate(u32 x, u32 y, u32 width, u32 height);
bool LoadData(u32 x, u32 y, u32 width, u32 height, const void* data, u32 pitch);
static void CopyToUploadBuffer(const void* src_data, u32 src_pitch, u32 height, void* dst_data, u32 dst_pitch);
void CopyFromBuffer(u32 x, u32 y, u32 width, u32 height, u32 pitch, ID3D12Resource* buffer, u32 buffer_offset);
private:
static bool CreateSRVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh);
static bool CreateRTVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh);
static bool CreateDSVDescriptor(ID3D12Resource* resource, DXGI_FORMAT format, bool multisampled,
DescriptorHandle* dh);
ComPtr<ID3D12Resource> m_resource;
DescriptorHandle m_srv_descriptor = {};
DescriptorHandle m_rtv_or_dsv_descriptor = {};
mutable D3D12_RESOURCE_STATES m_state = D3D12_RESOURCE_STATE_COMMON;
bool m_is_depth_view = false;
};
} // namespace D3D12

View File

@ -1,392 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "util.h"
#include "../assert.h"
#include "../log.h"
#include "../string.h"
#include "../string_util.h"
#include "context.h"
#include "shader_cache.h"
#include <cstdarg>
#include <limits>
Log_SetChannel(D3D12);
namespace D3D12 {
void ResourceBarrier(ID3D12GraphicsCommandList* cmdlist, ID3D12Resource* resource, D3D12_RESOURCE_STATES from_state,
D3D12_RESOURCE_STATES to_state)
{
const D3D12_RESOURCE_BARRIER barrier = {D3D12_RESOURCE_BARRIER_TYPE_TRANSITION,
D3D12_RESOURCE_BARRIER_FLAG_NONE,
{{resource, D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES, from_state, to_state}}};
cmdlist->ResourceBarrier(1, &barrier);
}
void SetViewport(ID3D12GraphicsCommandList* cmdlist, int x, int y, int width, int height, float min_depth /*= 0.0f*/,
float max_depth /*= 1.0f*/)
{
const D3D12_VIEWPORT vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
cmdlist->RSSetViewports(1, &vp);
}
void SetScissor(ID3D12GraphicsCommandList* cmdlist, int x, int y, int width, int height)
{
const D3D12_RECT r{x, y, x + width, y + height};
cmdlist->RSSetScissorRects(1, &r);
}
void SetViewportAndScissor(ID3D12GraphicsCommandList* cmdlist, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
SetViewport(cmdlist, x, y, width, height, min_depth, max_depth);
SetScissor(cmdlist, x, y, width, height);
}
void SetViewportAndClampScissor(ID3D12GraphicsCommandList* cmdlist, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
SetViewport(cmdlist, x, y, width, height, min_depth, max_depth);
const int cx = std::max(x, 0);
const int cy = std::max(y, 0);
const int cwidth = width - (cx - x);
const int cheight = height - (cy - y);
SetScissor(cmdlist, cx, cy, cwidth, cheight);
}
u32 GetTexelSize(DXGI_FORMAT format)
{
switch (format)
{
case DXGI_FORMAT_R8G8B8A8_UNORM:
case DXGI_FORMAT_R8G8B8A8_SNORM:
case DXGI_FORMAT_R8G8B8A8_TYPELESS:
case DXGI_FORMAT_B8G8R8A8_UNORM:
case DXGI_FORMAT_B8G8R8A8_TYPELESS:
return 4;
case DXGI_FORMAT_B5G5R5A1_UNORM:
case DXGI_FORMAT_B5G6R5_UNORM:
return 2;
default:
Panic("Unknown format");
return 1;
}
}
void SetDefaultSampler(D3D12_SAMPLER_DESC* desc)
{
desc->Filter = D3D12_FILTER_MIN_MAG_MIP_LINEAR;
desc->AddressU = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
desc->AddressV = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
desc->AddressW = D3D12_TEXTURE_ADDRESS_MODE_CLAMP;
desc->MipLODBias = 0;
desc->MaxAnisotropy = 1;
desc->ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
desc->BorderColor[0] = 1.0f;
desc->BorderColor[1] = 1.0f;
desc->BorderColor[2] = 1.0f;
desc->BorderColor[3] = 1.0f;
desc->MinLOD = -3.402823466e+38F; // -FLT_MAX
desc->MaxLOD = 3.402823466e+38F; // FLT_MAX
}
#ifdef _DEBUG
void SetObjectName(ID3D12Object* object, const char* name)
{
object->SetName(StringUtil::UTF8StringToWideString(name).c_str());
}
void SetObjectNameFormatted(ID3D12Object* object, const char* format, ...)
{
std::va_list ap;
va_start(ap, format);
SmallString str;
str.FormatVA(format, ap);
SetObjectName(object, str);
va_end(ap);
}
#endif
GraphicsPipelineBuilder::GraphicsPipelineBuilder()
{
Clear();
}
void GraphicsPipelineBuilder::Clear()
{
std::memset(&m_desc, 0, sizeof(m_desc));
std::memset(m_input_elements.data(), 0, sizeof(D3D12_INPUT_ELEMENT_DESC) * m_input_elements.size());
m_desc.NodeMask = 1;
m_desc.SampleMask = 0xFFFFFFFF;
m_desc.SampleDesc.Count = 1;
}
Microsoft::WRL::ComPtr<ID3D12PipelineState> GraphicsPipelineBuilder::Create(ID3D12Device* device, bool clear /*= true*/)
{
Microsoft::WRL::ComPtr<ID3D12PipelineState> ps;
HRESULT hr = device->CreateGraphicsPipelineState(&m_desc, IID_PPV_ARGS(ps.GetAddressOf()));
if (FAILED(hr))
{
Log_ErrorPrintf("CreateGraphicsPipelineState() failed: %08X", hr);
return {};
}
if (clear)
Clear();
return ps;
}
Microsoft::WRL::ComPtr<ID3D12PipelineState> GraphicsPipelineBuilder::Create(ID3D12Device* device, ShaderCache& cache,
bool clear /*= true*/)
{
Microsoft::WRL::ComPtr<ID3D12PipelineState> pso = cache.GetPipelineState(device, m_desc);
if (!pso)
return {};
if (clear)
Clear();
return pso;
}
void GraphicsPipelineBuilder::SetRootSignature(ID3D12RootSignature* rs)
{
m_desc.pRootSignature = rs;
}
void GraphicsPipelineBuilder::SetVertexShader(ID3DBlob* blob)
{
SetVertexShader(blob->GetBufferPointer(), static_cast<u32>(blob->GetBufferSize()));
}
void GraphicsPipelineBuilder::SetVertexShader(const void* data, u32 data_size)
{
m_desc.VS.pShaderBytecode = data;
m_desc.VS.BytecodeLength = data_size;
}
void GraphicsPipelineBuilder::SetGeometryShader(ID3DBlob* blob)
{
SetGeometryShader(blob->GetBufferPointer(), static_cast<u32>(blob->GetBufferSize()));
}
void GraphicsPipelineBuilder::SetGeometryShader(const void* data, u32 data_size)
{
m_desc.GS.pShaderBytecode = data;
m_desc.GS.BytecodeLength = data_size;
}
void GraphicsPipelineBuilder::SetPixelShader(ID3DBlob* blob)
{
SetPixelShader(blob->GetBufferPointer(), static_cast<u32>(blob->GetBufferSize()));
}
void GraphicsPipelineBuilder::SetPixelShader(const void* data, u32 data_size)
{
m_desc.PS.pShaderBytecode = data;
m_desc.PS.BytecodeLength = data_size;
}
void GraphicsPipelineBuilder::AddVertexAttribute(const char* semantic_name, u32 semantic_index, DXGI_FORMAT format,
u32 buffer, u32 offset)
{
const u32 index = m_desc.InputLayout.NumElements;
m_input_elements[index].SemanticIndex = semantic_index;
m_input_elements[index].SemanticName = semantic_name;
m_input_elements[index].Format = format;
m_input_elements[index].AlignedByteOffset = offset;
m_input_elements[index].InputSlot = buffer;
m_input_elements[index].InputSlotClass = D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA;
m_input_elements[index].InstanceDataStepRate = 0;
m_desc.InputLayout.pInputElementDescs = m_input_elements.data();
m_desc.InputLayout.NumElements++;
}
void GraphicsPipelineBuilder::SetPrimitiveTopologyType(D3D12_PRIMITIVE_TOPOLOGY_TYPE type)
{
m_desc.PrimitiveTopologyType = type;
}
void GraphicsPipelineBuilder::SetRasterizationState(D3D12_FILL_MODE polygon_mode, D3D12_CULL_MODE cull_mode,
bool front_face_ccw)
{
m_desc.RasterizerState.FillMode = polygon_mode;
m_desc.RasterizerState.CullMode = cull_mode;
m_desc.RasterizerState.FrontCounterClockwise = front_face_ccw;
}
void GraphicsPipelineBuilder::SetMultisamples(u32 multisamples)
{
m_desc.RasterizerState.MultisampleEnable = multisamples > 1;
m_desc.SampleDesc.Count = multisamples;
}
void GraphicsPipelineBuilder::SetNoCullRasterizationState()
{
SetRasterizationState(D3D12_FILL_MODE_SOLID, D3D12_CULL_MODE_NONE, false);
}
void GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, D3D12_COMPARISON_FUNC compare_op)
{
m_desc.DepthStencilState.DepthEnable = depth_test;
m_desc.DepthStencilState.DepthWriteMask = depth_write ? D3D12_DEPTH_WRITE_MASK_ALL : D3D12_DEPTH_WRITE_MASK_ZERO;
m_desc.DepthStencilState.DepthFunc = compare_op;
}
void GraphicsPipelineBuilder::SetNoDepthTestState()
{
SetDepthState(false, false, D3D12_COMPARISON_FUNC_ALWAYS);
}
void GraphicsPipelineBuilder::SetBlendState(u32 rt, bool blend_enable, D3D12_BLEND src_factor, D3D12_BLEND dst_factor,
D3D12_BLEND_OP op, D3D12_BLEND alpha_src_factor,
D3D12_BLEND alpha_dst_factor, D3D12_BLEND_OP alpha_op,
u8 write_mask /*= 0xFF*/)
{
m_desc.BlendState.RenderTarget[rt].BlendEnable = blend_enable;
m_desc.BlendState.RenderTarget[rt].SrcBlend = src_factor;
m_desc.BlendState.RenderTarget[rt].DestBlend = dst_factor;
m_desc.BlendState.RenderTarget[rt].BlendOp = op;
m_desc.BlendState.RenderTarget[rt].SrcBlendAlpha = alpha_src_factor;
m_desc.BlendState.RenderTarget[rt].DestBlendAlpha = alpha_dst_factor;
m_desc.BlendState.RenderTarget[rt].BlendOpAlpha = alpha_op;
m_desc.BlendState.RenderTarget[rt].RenderTargetWriteMask = write_mask;
if (rt > 0)
m_desc.BlendState.IndependentBlendEnable = TRUE;
}
void GraphicsPipelineBuilder::SetNoBlendingState()
{
SetBlendState(0, false, D3D12_BLEND_ONE, D3D12_BLEND_ZERO, D3D12_BLEND_OP_ADD, D3D12_BLEND_ONE, D3D12_BLEND_ZERO,
D3D12_BLEND_OP_ADD, D3D12_COLOR_WRITE_ENABLE_ALL);
m_desc.BlendState.IndependentBlendEnable = FALSE;
}
void GraphicsPipelineBuilder::ClearRenderTargets()
{
m_desc.NumRenderTargets = 0;
for (u32 i = 0; i < sizeof(m_desc.RTVFormats) / sizeof(m_desc.RTVFormats[0]); i++)
m_desc.RTVFormats[i] = DXGI_FORMAT_UNKNOWN;
}
void GraphicsPipelineBuilder::SetRenderTarget(u32 rt, DXGI_FORMAT format)
{
m_desc.RTVFormats[rt] = format;
if (rt >= m_desc.NumRenderTargets)
m_desc.NumRenderTargets = rt + 1;
}
void GraphicsPipelineBuilder::ClearDepthStencilFormat()
{
m_desc.DSVFormat = DXGI_FORMAT_UNKNOWN;
}
void GraphicsPipelineBuilder::SetDepthStencilFormat(DXGI_FORMAT format)
{
m_desc.DSVFormat = format;
}
RootSignatureBuilder::RootSignatureBuilder()
{
Clear();
}
void RootSignatureBuilder::Clear()
{
m_desc = {};
m_desc.pParameters = m_params.data();
m_params = {};
m_descriptor_ranges = {};
m_num_descriptor_ranges = 0;
}
Microsoft::WRL::ComPtr<ID3D12RootSignature> RootSignatureBuilder::Create(bool clear /*= true*/)
{
Microsoft::WRL::ComPtr<ID3D12RootSignature> rs = g_d3d12_context->CreateRootSignature(&m_desc);
if (!rs)
return {};
if (clear)
Clear();
return rs;
}
void RootSignatureBuilder::SetInputAssemblerFlag()
{
m_desc.Flags |= D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT;
}
u32 RootSignatureBuilder::Add32BitConstants(u32 shader_reg, u32 num_values, D3D12_SHADER_VISIBILITY visibility)
{
const u32 index = m_desc.NumParameters++;
m_params[index].ParameterType = D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS;
m_params[index].ShaderVisibility = visibility;
m_params[index].Constants.ShaderRegister = shader_reg;
m_params[index].Constants.RegisterSpace = 0;
m_params[index].Constants.Num32BitValues = num_values;
return index;
}
u32 RootSignatureBuilder::AddCBVParameter(u32 shader_reg, D3D12_SHADER_VISIBILITY visibility)
{
const u32 index = m_desc.NumParameters++;
m_params[index].ParameterType = D3D12_ROOT_PARAMETER_TYPE_CBV;
m_params[index].ShaderVisibility = visibility;
m_params[index].Descriptor.ShaderRegister = shader_reg;
m_params[index].Descriptor.RegisterSpace = 0;
return index;
}
u32 RootSignatureBuilder::AddSRVParameter(u32 shader_reg, D3D12_SHADER_VISIBILITY visibility)
{
const u32 index = m_desc.NumParameters++;
m_params[index].ParameterType = D3D12_ROOT_PARAMETER_TYPE_SRV;
m_params[index].ShaderVisibility = visibility;
m_params[index].Descriptor.ShaderRegister = shader_reg;
m_params[index].Descriptor.RegisterSpace = 0;
return index;
}
u32 RootSignatureBuilder::AddDescriptorTable(D3D12_DESCRIPTOR_RANGE_TYPE rt, u32 start_shader_reg, u32 num_shader_regs,
D3D12_SHADER_VISIBILITY visibility)
{
const u32 index = m_desc.NumParameters++;
const u32 dr_index = m_num_descriptor_ranges++;
m_descriptor_ranges[dr_index].RangeType = rt;
m_descriptor_ranges[dr_index].NumDescriptors = num_shader_regs;
m_descriptor_ranges[dr_index].BaseShaderRegister = start_shader_reg;
m_descriptor_ranges[dr_index].RegisterSpace = 0;
m_descriptor_ranges[dr_index].OffsetInDescriptorsFromTableStart = D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND;
m_params[index].ParameterType = D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE;
m_params[index].DescriptorTable.pDescriptorRanges = &m_descriptor_ranges[dr_index];
m_params[index].DescriptorTable.NumDescriptorRanges = 1;
m_params[index].ShaderVisibility = visibility;
return index;
}
} // namespace D3D12

View File

@ -1,388 +0,0 @@
#include "drm_display.h"
#include "common/assert.h"
#include "common/log.h"
#include "common/scoped_guard.h"
#include "common/string.h"
#include "file_system.h"
#include <cmath>
#include <fcntl.h>
#include <string.h>
#include <unistd.h>
Log_SetChannel(DRMDisplay);
enum
{
MAX_CARDS_TO_TRY = 10
};
DRMDisplay::DRMDisplay(int card /*= 1*/) : m_card_id(card) {}
DRMDisplay::~DRMDisplay()
{
// restore original buffer
if (m_prev_crtc)
RestoreBuffer();
if (m_connector)
drmModeFreeConnector(m_connector);
if (m_card_fd >= 0)
close(m_card_fd);
}
// https://gist.github.com/Miouyouyou/89e9fe56a2c59bce7d4a18a858f389ef
static uint32_t find_crtc_for_encoder(const drmModeRes* resources, const drmModeEncoder* encoder)
{
int i;
for (i = 0; i < resources->count_crtcs; i++)
{
/* possible_crtcs is a bitmask as described here:
* https://dvdhrm.wordpress.com/2012/09/13/linux-drm-mode-setting-api
*/
const uint32_t crtc_mask = 1 << i;
const uint32_t crtc_id = resources->crtcs[i];
if (encoder->possible_crtcs & crtc_mask)
{
return crtc_id;
}
}
/* no match found */
return -1;
}
static uint32_t find_crtc_for_connector(int card_fd, const drmModeRes* resources, const drmModeConnector* connector)
{
int i;
for (i = 0; i < connector->count_encoders; i++)
{
const uint32_t encoder_id = connector->encoders[i];
drmModeEncoder* encoder = drmModeGetEncoder(card_fd, encoder_id);
if (encoder)
{
const uint32_t crtc_id = find_crtc_for_encoder(resources, encoder);
drmModeFreeEncoder(encoder);
if (crtc_id != 0)
{
return crtc_id;
}
}
}
/* no match found */
return -1;
}
bool DRMDisplay::Initialize(u32 width, u32 height, float refresh_rate)
{
if (m_card_id < 0)
{
for (int i = 0; i < MAX_CARDS_TO_TRY; i++)
{
if (TryOpeningCard(i, width, height, refresh_rate))
return true;
}
return false;
}
return TryOpeningCard(m_card_id, width, height, refresh_rate);
}
void DRMDisplay::RestoreBuffer()
{
if (m_prev_crtc)
{
u32 connector_id = m_connector->connector_id;
drmModeSetCrtc(m_card_fd, m_prev_crtc->crtc_id, m_prev_crtc->buffer_id, m_prev_crtc->x, m_prev_crtc->y,
&connector_id, 1, &m_prev_crtc->mode);
drmModeFreeCrtc(m_prev_crtc);
m_prev_crtc = nullptr;
}
}
bool DRMDisplay::TryOpeningCard(int card, u32 width, u32 height, float refresh_rate)
{
if (m_card_fd >= 0)
close(m_card_fd);
m_card_fd = open(TinyString::FromFormat("/dev/dri/card%d", card), O_RDWR);
if (m_card_fd < 0)
{
Log_ErrorPrintf("open(/dev/dri/card%d) failed: %d (%s)", card, errno, strerror(errno));
return false;
}
drmModeRes* resources = drmModeGetResources(m_card_fd);
if (!resources)
{
Log_ErrorPrintf("drmModeGetResources() failed: %d (%s)", errno, strerror(errno));
return false;
}
Assert(!m_connector);
for (int i = 0; i < resources->count_connectors; i++)
{
drmModeConnector* next_connector = drmModeGetConnector(m_card_fd, resources->connectors[i]);
if (next_connector->connection == DRM_MODE_CONNECTED)
{
m_connector = next_connector;
break;
}
drmModeFreeConnector(next_connector);
}
if (!m_connector)
{
Log_ErrorPrintf("No connector found");
drmModeFreeResources(resources);
return false;
}
for (int i = 0; i < m_connector->count_modes; i++)
{
drmModeModeInfo* next_mode = &m_connector->modes[i];
const float mode_refresh_rate = (static_cast<float>(next_mode->clock) * 1000.0f) /
(static_cast<float>(next_mode->htotal) * static_cast<float>(next_mode->vtotal));
Log_DevPrintf("Checking mode %u: %ux%u @ %f hz", i, next_mode->hdisplay, next_mode->vdisplay, mode_refresh_rate);
if (width == 0 && height == 0)
{
// use preferred mode if we're auto selecting
if (next_mode->type & DRM_MODE_TYPE_PREFERRED)
{
m_mode = next_mode;
break;
}
else if (!m_mode)
{
m_mode = next_mode;
}
}
else
{
if (width == next_mode->hdisplay && height == next_mode->vdisplay &&
(refresh_rate == 0.0f || std::abs(mode_refresh_rate - refresh_rate) < 0.1f))
{
m_mode = next_mode;
break;
}
}
}
if (!m_mode)
{
Log_ErrorPrintf("No mode found");
drmModeFreeResources(resources);
return false;
}
drmModeEncoder* encoder = nullptr;
for (int i = 0; i < resources->count_encoders; i++)
{
drmModeEncoder* next_encoder = drmModeGetEncoder(m_card_fd, resources->encoders[i]);
if (next_encoder->encoder_id == m_connector->encoder_id)
{
encoder = next_encoder;
m_crtc_id = encoder->crtc_id;
break;
}
drmModeFreeEncoder(next_encoder);
}
if (encoder)
{
drmModeFreeEncoder(encoder);
}
else
{
m_crtc_id = find_crtc_for_connector(m_card_fd, resources, m_connector);
if (m_crtc_id == 0)
{
Log_ErrorPrintf("No CRTC found");
drmModeFreeResources(resources);
return false;
}
}
drmModeFreeResources(resources);
m_card_id = card;
m_prev_crtc = drmModeGetCrtc(m_card_fd, m_crtc_id);
return true;
}
std::optional<u32> DRMDisplay::AddBuffer(u32 width, u32 height, u32 format, u32 handle, u32 pitch, u32 offset)
{
uint32_t bo_handles[4] = {handle, 0, 0, 0};
uint32_t pitches[4] = {pitch, 0, 0, 0};
uint32_t offsets[4] = {offset, 0, 0, 0};
u32 fb_id;
int res = drmModeAddFB2(m_card_fd, width, height, format, bo_handles, pitches, offsets, &fb_id, 0);
if (res != 0)
{
Log_ErrorPrintf("drmModeAddFB2() failed: %d", res);
return std::nullopt;
}
return fb_id;
}
void DRMDisplay::RemoveBuffer(u32 fb_id)
{
drmModeRmFB(m_card_fd, fb_id);
}
void DRMDisplay::PresentBuffer(u32 fb_id, bool wait_for_vsync)
{
if (!wait_for_vsync)
{
u32 connector_id = m_connector->connector_id;
int res = drmModeSetCrtc(m_card_fd, m_crtc_id, fb_id, 0, 0, &connector_id, 1, m_mode);
if (res != 0)
Log_ErrorPrintf("drmModeSetCrtc() failed: %d", res);
return;
}
bool waiting_for_flip = true;
drmEventContext event_ctx = {};
event_ctx.version = DRM_EVENT_CONTEXT_VERSION;
event_ctx.page_flip_handler = [](int fd, unsigned int frame, unsigned int sec, unsigned int usec, void* data) {
*reinterpret_cast<bool*>(data) = false;
};
int res = drmModePageFlip(m_card_fd, m_crtc_id, fb_id, DRM_MODE_PAGE_FLIP_EVENT, &waiting_for_flip);
if (res != 0)
{
Log_ErrorPrintf("drmModePageFlip() failed: %d", res);
return;
}
while (waiting_for_flip)
{
fd_set fds;
FD_ZERO(&fds);
FD_SET(m_card_fd, &fds);
int res = select(m_card_fd + 1, &fds, nullptr, nullptr, nullptr);
if (res < 0)
{
Log_ErrorPrintf("select() failed: %d", errno);
break;
}
else if (res == 0)
{
continue;
}
drmHandleEvent(m_card_fd, &event_ctx);
}
}
bool DRMDisplay::GetCurrentMode(u32* width, u32* height, float* refresh_rate, int card, int connector)
{
int card_fd = -1;
if (card < 0)
{
for (int try_card = 0; try_card < MAX_CARDS_TO_TRY; try_card++)
{
card_fd = open(TinyString::FromFormat("/dev/dri/card%d", try_card), O_RDWR);
if (card_fd >= 0)
break;
}
}
else
{
card_fd = open(TinyString::FromFormat("/dev/dri/card%d", card), O_RDWR);
}
if (card_fd < 0)
{
Log_ErrorPrintf("open(/dev/dri/card%d) failed: %d (%s)", card, errno, strerror(errno));
return false;
}
ScopedGuard card_guard([card_fd]() { close(card_fd); });
drmModeRes* resources = drmModeGetResources(card_fd);
if (!resources)
{
Log_ErrorPrintf("drmModeGetResources() failed: %d (%s)", errno, strerror(errno));
return false;
}
ScopedGuard resources_guard([resources]() { drmModeFreeResources(resources); });
drmModeConnector* connector_ptr = nullptr;
if (connector < 0)
{
for (int i = 0; i < resources->count_connectors; i++)
{
connector_ptr = drmModeGetConnector(card_fd, resources->connectors[i]);
if (connector_ptr->connection == DRM_MODE_CONNECTED)
break;
drmModeFreeConnector(connector_ptr);
}
}
else if (connector < resources->count_connectors)
{
connector_ptr = drmModeGetConnector(card_fd, resources->connectors[connector]);
}
ScopedGuard connector_guard([connector_ptr]() {
if (connector_ptr)
drmModeFreeConnector(connector_ptr);
});
if (!connector_ptr || connector_ptr->connection != DRM_MODE_CONNECTED)
{
Log_ErrorPrintf("No connector found");
return false;
}
drmModeEncoder* encoder = drmModeGetEncoder(card_fd, connector_ptr->encoder_id);
if (!encoder)
{
Log_ErrorPrint("No encoder found");
return false;
}
ScopedGuard encoder_guard([encoder]() { drmModeFreeEncoder(encoder); });
drmModeCrtc* crtc = drmModeGetCrtc(card_fd, encoder->crtc_id);
if (!crtc)
{
Log_ErrorPrint("No CRTC found");
return false;
}
if (!crtc->mode_valid)
{
Log_ErrorPrint("CRTC mode not valid");
return false;
}
const u32 current_width = static_cast<u32>(crtc->mode.hdisplay);
const u32 current_height = static_cast<u32>(crtc->mode.vdisplay);
const float current_refresh_rate = (static_cast<float>(crtc->mode.clock) * 1000.0f) /
(static_cast<float>(crtc->mode.htotal) * static_cast<float>(crtc->mode.vtotal));
Log_InfoPrintf("Current mode for card %d: %ux%u@%f", card, current_width, current_height, current_refresh_rate);
if (width)
*width = current_width;
if (height)
*height = current_height;
if (refresh_rate)
*refresh_rate = current_refresh_rate;
return true;
}

View File

@ -1,61 +0,0 @@
#pragma once
#include "types.h"
#include <array>
#include <optional>
#include <xf86drm.h>
#include <xf86drmMode.h>
class DRMDisplay
{
public:
DRMDisplay(int card = -1);
~DRMDisplay();
static bool GetCurrentMode(u32* width, u32* height, float* refresh_rate, int card = -1, int connector = -1);
bool Initialize(u32 width, u32 height, float refresh_rate);
/// Restores the buffer saved at startup.
void RestoreBuffer();
int GetCardID() const { return m_card_id; }
int GetCardFD() const { return m_card_fd; }
u32 GetWidth() const { return m_mode->hdisplay; }
u32 GetHeight() const { return m_mode->vdisplay; }
float GetRefreshRate() const
{
return (static_cast<float>(m_mode->clock) * 1000.0f) /
(static_cast<float>(m_mode->htotal) * static_cast<float>(m_mode->vtotal));
}
u32 GetModeCount() const { return m_connector->count_modes; }
u32 GetModeWidth(u32 i) const { return m_connector->modes[i].hdisplay; }
u32 GetModeHeight(u32 i) const { return m_connector->modes[i].vdisplay; }
float GetModeRefreshRate(u32 i) const
{
return (static_cast<float>(m_connector->modes[i].clock) * 1000.0f) /
(static_cast<float>(m_connector->modes[i].htotal) * static_cast<float>(m_connector->modes[i].vtotal));
}
std::optional<u32> AddBuffer(u32 width, u32 height, u32 format, u32 handle, u32 pitch, u32 offset);
void RemoveBuffer(u32 fb_id);
void PresentBuffer(u32 fb_id, bool wait_for_vsync);
private:
enum : u32
{
MAX_BUFFERS = 5
};
bool TryOpeningCard(int card, u32 width, u32 height, float refresh_rate);
int m_card_id = 0;
int m_card_fd = -1;
u32 m_crtc_id = 0;
drmModeRes* m_resources = nullptr;
drmModeConnector* m_connector = nullptr;
drmModeModeInfo* m_mode = nullptr;
drmModeCrtc* m_prev_crtc = nullptr;
};

View File

@ -1,48 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "context_egl_android.h"
#include "../log.h"
#include <android/native_window.h>
Log_SetChannel(GL::ContextEGLAndroid);
namespace GL {
ContextEGLAndroid::ContextEGLAndroid(const WindowInfo& wi) : ContextEGL(wi) {}
ContextEGLAndroid::~ContextEGLAndroid() = default;
std::unique_ptr<Context> ContextEGLAndroid::Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try)
{
std::unique_ptr<ContextEGLAndroid> context = std::make_unique<ContextEGLAndroid>(wi);
if (!context->Initialize(versions_to_try, num_versions_to_try))
return nullptr;
return context;
}
std::unique_ptr<Context> ContextEGLAndroid::CreateSharedContext(const WindowInfo& wi)
{
std::unique_ptr<ContextEGLAndroid> context = std::make_unique<ContextEGLAndroid>(wi);
context->m_display = m_display;
if (!context->CreateContextAndSurface(m_version, m_context, false))
return nullptr;
return context;
}
EGLNativeWindowType ContextEGLAndroid::GetNativeWindow(EGLConfig config)
{
EGLint native_visual_id = 0;
if (!eglGetConfigAttrib(m_display, m_config, EGL_NATIVE_VISUAL_ID, &native_visual_id))
{
Log_ErrorPrintf("Failed to get native visual ID");
return 0;
}
ANativeWindow_setBuffersGeometry(static_cast<ANativeWindow*>(m_wi.window_handle), 0, 0, static_cast<int32_t>(native_visual_id));
m_wi.surface_width = ANativeWindow_getWidth(static_cast<ANativeWindow*>(m_wi.window_handle));
m_wi.surface_height = ANativeWindow_getHeight(static_cast<ANativeWindow*>(m_wi.window_handle));
return static_cast<EGLNativeWindowType>(m_wi.window_handle);
}
} // namespace GL

View File

@ -1,24 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "context_egl.h"
namespace GL {
class ContextEGLAndroid final : public ContextEGL
{
public:
ContextEGLAndroid(const WindowInfo& wi);
~ContextEGLAndroid() override;
static std::unique_ptr<Context> Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try);
std::unique_ptr<Context> CreateSharedContext(const WindowInfo& wi) override;
protected:
EGLNativeWindowType GetNativeWindow(EGLConfig config) override;
};
} // namespace GL

View File

@ -1,35 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "context_egl_fbdev.h"
namespace GL {
ContextEGLFBDev::ContextEGLFBDev(const WindowInfo& wi) : ContextEGL(wi) {}
ContextEGLFBDev::~ContextEGLFBDev() = default;
std::unique_ptr<Context> ContextEGLFBDev::Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try)
{
std::unique_ptr<ContextEGLFBDev> context = std::make_unique<ContextEGLFBDev>(wi);
if (!context->Initialize(versions_to_try, num_versions_to_try))
return nullptr;
return context;
}
std::unique_ptr<Context> ContextEGLFBDev::CreateSharedContext(const WindowInfo& wi)
{
std::unique_ptr<ContextEGLFBDev> context = std::make_unique<ContextEGLFBDev>(wi);
context->m_display = m_display;
if (!context->CreateContextAndSurface(m_version, m_context, false))
return nullptr;
return context;
}
EGLNativeWindowType ContextEGLFBDev::GetNativeWindow(EGLConfig config)
{
return static_cast<EGLNativeWindowType>(0);
}
} // namespace GL

View File

@ -1,24 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "context_egl.h"
namespace GL {
class ContextEGLFBDev final : public ContextEGL
{
public:
ContextEGLFBDev(const WindowInfo& wi);
~ContextEGLFBDev() override;
static std::unique_ptr<Context> Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try);
std::unique_ptr<Context> CreateSharedContext(const WindowInfo& wi) override;
protected:
EGLNativeWindowType GetNativeWindow(EGLConfig config) override;
};
} // namespace GL

View File

@ -1,289 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "context_egl_gbm.h"
#include "../assert.h"
#include "../log.h"
#include <drm.h>
#include <drm_fourcc.h>
#include <gbm.h>
Log_SetChannel(GL::ContextEGLGBM);
namespace GL {
ContextEGLGBM::ContextEGLGBM(const WindowInfo& wi) : ContextEGL(wi)
{
#ifdef CONTEXT_EGL_GBM_USE_PRESENT_THREAD
StartPresentThread();
#endif
}
ContextEGLGBM::~ContextEGLGBM()
{
#ifdef CONTEXT_EGL_GBM_USE_PRESENT_THREAD
StopPresentThread();
Assert(!m_current_present_buffer);
#endif
m_drm_display.RestoreBuffer();
// We have to destroy the context before the surface/device.
// Leaving it to the base class would be too late.
DestroySurface();
DestroyContext();
while (m_num_buffers > 0)
{
Buffer& buffer = m_buffers[--m_num_buffers];
m_drm_display.RemoveBuffer(buffer.fb_id);
}
if (m_fb_surface)
gbm_surface_destroy(m_fb_surface);
if (m_gbm_device)
gbm_device_destroy(m_gbm_device);
}
std::unique_ptr<Context> ContextEGLGBM::Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try)
{
std::unique_ptr<ContextEGLGBM> context = std::make_unique<ContextEGLGBM>(wi);
if (!context->CreateDisplay() || !context->CreateGBMDevice() ||
!context->Initialize(versions_to_try, num_versions_to_try))
{
return nullptr;
}
return context;
}
std::unique_ptr<Context> ContextEGLGBM::CreateSharedContext(const WindowInfo& wi)
{
std::unique_ptr<ContextEGLGBM> context = std::make_unique<ContextEGLGBM>(wi);
context->m_display = m_display;
if (!context->CreateContextAndSurface(m_version, m_context, false))
return nullptr;
return context;
}
void ContextEGLGBM::ResizeSurface(u32 new_surface_width, u32 new_surface_height)
{
ContextEGL::ResizeSurface(new_surface_width, new_surface_height);
}
bool ContextEGLGBM::CreateGBMDevice()
{
Assert(!m_gbm_device);
m_gbm_device = gbm_create_device(m_drm_display.GetCardFD());
if (!m_gbm_device)
{
Log_ErrorPrintf("gbm_create_device() failed: %d", errno);
return false;
}
return true;
}
bool ContextEGLGBM::CreateDisplay()
{
if (!m_drm_display.Initialize(m_wi.surface_width, m_wi.surface_height, m_wi.surface_refresh_rate))
return false;
m_wi.surface_width = m_drm_display.GetWidth();
m_wi.surface_height = m_drm_display.GetHeight();
m_wi.surface_refresh_rate = m_drm_display.GetRefreshRate();
return true;
}
bool ContextEGLGBM::SetDisplay()
{
if (!eglGetPlatformDisplayEXT)
{
Log_ErrorPrintf("eglGetPlatformDisplayEXT() not loaded");
return false;
}
m_display = eglGetPlatformDisplayEXT(EGL_PLATFORM_GBM_KHR, m_gbm_device, nullptr);
if (!m_display)
{
Log_ErrorPrintf("eglGetPlatformDisplayEXT() failed");
return false;
}
return true;
}
EGLNativeWindowType ContextEGLGBM::GetNativeWindow(EGLConfig config)
{
EGLint visual_id;
eglGetConfigAttrib(m_display, config, EGL_NATIVE_VISUAL_ID, &visual_id);
Assert(!m_fb_surface);
m_fb_surface = gbm_surface_create(m_gbm_device, m_drm_display.GetWidth(), m_drm_display.GetHeight(),
static_cast<u32>(visual_id), GBM_BO_USE_RENDERING | GBM_BO_USE_SCANOUT);
if (!m_fb_surface)
{
Log_ErrorPrintf("gbm_surface_create() failed: %d", errno);
return {};
}
return (EGLNativeWindowType)((void*)m_fb_surface);
}
ContextEGLGBM::Buffer* ContextEGLGBM::LockFrontBuffer()
{
struct gbm_bo* bo = gbm_surface_lock_front_buffer(m_fb_surface);
Buffer* buffer = nullptr;
for (u32 i = 0; i < m_num_buffers; i++)
{
if (m_buffers[i].bo == bo)
{
buffer = &m_buffers[i];
break;
}
}
if (!buffer)
{
// haven't tracked this buffer yet
Assert(m_num_buffers < MAX_BUFFERS);
const u32 width = gbm_bo_get_width(bo);
const u32 height = gbm_bo_get_height(bo);
const u32 stride = gbm_bo_get_stride(bo);
const u32 format = gbm_bo_get_format(bo);
const u32 handle = gbm_bo_get_handle(bo).u32;
std::optional<u32> fb_id = m_drm_display.AddBuffer(width, height, format, handle, stride, 0);
if (!fb_id.has_value())
return nullptr;
buffer = &m_buffers[m_num_buffers];
buffer->bo = bo;
buffer->fb_id = fb_id.value();
m_num_buffers++;
}
return buffer;
}
void ContextEGLGBM::ReleaseBuffer(Buffer* buffer)
{
gbm_surface_release_buffer(m_fb_surface, buffer->bo);
}
void ContextEGLGBM::PresentBuffer(Buffer* buffer, bool wait_for_vsync)
{
m_drm_display.PresentBuffer(buffer->fb_id, wait_for_vsync);
}
bool ContextEGLGBM::SwapBuffers()
{
if (!ContextEGL::SwapBuffers())
return false;
#ifdef CONTEXT_EGL_GBM_USE_PRESENT_THREAD
std::unique_lock lock(m_present_mutex);
m_present_pending.store(true);
m_present_cv.notify_one();
if (m_vsync)
m_present_done_cv.wait(lock, [this]() { return !m_present_pending.load(); });
#else
Buffer* front_buffer = LockFrontBuffer();
if (!front_buffer)
return false;
PresentSurface(front_buffer, m_vsync && m_last_front_buffer);
if (m_last_front_buffer)
ReleaseBuffer(m_last_front_buffer);
m_last_front_buffer = front_buffer;
#endif
return true;
}
bool ContextEGLGBM::SetSwapInterval(s32 interval)
{
if (interval < 0 || interval > 1)
return false;
std::unique_lock lock(m_present_mutex);
m_vsync = (interval > 0);
return true;
}
std::vector<Context::FullscreenModeInfo> ContextEGLGBM::EnumerateFullscreenModes()
{
std::vector<Context::FullscreenModeInfo> modes;
modes.reserve(m_drm_display.GetModeCount());
for (u32 i = 0; i < m_drm_display.GetModeCount(); i++)
{
modes.push_back(FullscreenModeInfo{m_drm_display.GetModeWidth(i), m_drm_display.GetModeHeight(i),
m_drm_display.GetModeRefreshRate(i)});
}
return modes;
}
#ifdef CONTEXT_EGL_GBM_USE_PRESENT_THREAD
void ContextEGLGBM::StartPresentThread()
{
m_present_thread_shutdown.store(false);
m_present_thread = std::thread(&ContextEGLGBM::PresentThread, this);
}
void ContextEGLGBM::StopPresentThread()
{
if (!m_present_thread.joinable())
return;
{
std::unique_lock lock(m_present_mutex);
m_present_thread_shutdown.store(true);
m_present_cv.notify_one();
}
m_present_thread.join();
}
void ContextEGLGBM::PresentThread()
{
std::unique_lock lock(m_present_mutex);
while (!m_present_thread_shutdown.load())
{
m_present_cv.wait(lock);
if (!m_present_pending.load())
continue;
Buffer* next_buffer = LockFrontBuffer();
const bool wait_for_vsync = m_vsync && m_current_present_buffer;
lock.unlock();
PresentBuffer(next_buffer, wait_for_vsync);
lock.lock();
if (m_current_present_buffer)
ReleaseBuffer(m_current_present_buffer);
m_current_present_buffer = next_buffer;
m_present_pending.store(false);
m_present_done_cv.notify_one();
}
if (m_current_present_buffer)
{
ReleaseBuffer(m_current_present_buffer);
m_current_present_buffer = nullptr;
}
}
#endif
} // namespace GL

View File

@ -1,81 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../drm_display.h"
#include "context_egl.h"
#include <atomic>
#include <condition_variable>
#include <gbm.h>
#include <mutex>
#include <thread>
#define CONTEXT_EGL_GBM_USE_PRESENT_THREAD 1
namespace GL {
class ContextEGLGBM final : public ContextEGL
{
public:
ContextEGLGBM(const WindowInfo& wi);
~ContextEGLGBM() override;
static std::unique_ptr<Context> Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try);
std::unique_ptr<Context> CreateSharedContext(const WindowInfo& wi) override;
void ResizeSurface(u32 new_surface_width = 0, u32 new_surface_height = 0) override;
bool SwapBuffers() override;
bool SetSwapInterval(s32 interval) override;
std::vector<FullscreenModeInfo> EnumerateFullscreenModes() override;
protected:
bool SetDisplay() override;
EGLNativeWindowType GetNativeWindow(EGLConfig config) override;
private:
enum : u32
{
MAX_BUFFERS = 5
};
struct Buffer
{
struct gbm_bo* bo;
u32 fb_id;
};
bool CreateDisplay();
bool CreateGBMDevice();
Buffer* LockFrontBuffer();
void ReleaseBuffer(Buffer* buffer);
void PresentBuffer(Buffer* buffer, bool wait_for_vsync);
void StartPresentThread();
void StopPresentThread();
void PresentThread();
DRMDisplay m_drm_display;
struct gbm_device* m_gbm_device = nullptr;
struct gbm_surface* m_fb_surface = nullptr;
bool m_vsync = true;
#ifdef CONTEXT_EGL_GBM_USE_PRESENT_THREAD
std::thread m_present_thread;
std::mutex m_present_mutex;
std::condition_variable m_present_cv;
std::atomic_bool m_present_pending{false};
std::atomic_bool m_present_thread_shutdown{false};
std::condition_variable m_present_done_cv;
Buffer* m_current_present_buffer = nullptr;
#endif
u32 m_num_buffers = 0;
std::array<Buffer, MAX_BUFFERS> m_buffers{};
};
} // namespace GL

View File

@ -1,332 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "context_glx.h"
#include "../assert.h"
#include "../log.h"
#include <dlfcn.h>
Log_SetChannel(GL::ContextGLX);
namespace GL {
ContextGLX::ContextGLX(const WindowInfo& wi) : Context(wi) {}
ContextGLX::~ContextGLX()
{
if (glXGetCurrentContext() == m_context)
glXMakeContextCurrent(GetDisplay(), None, None, None);
if (m_context)
glXDestroyContext(GetDisplay(), m_context);
if (m_vi)
XFree(m_vi);
if (m_libGL_handle)
dlclose(m_libGL_handle);
}
std::unique_ptr<Context> ContextGLX::Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try)
{
std::unique_ptr<ContextGLX> context = std::make_unique<ContextGLX>(wi);
if (!context->Initialize(versions_to_try, num_versions_to_try))
return nullptr;
return context;
}
bool ContextGLX::Initialize(const Version* versions_to_try, size_t num_versions_to_try)
{
// We need libGL loaded, because GLAD loads its own, then releases it.
m_libGL_handle = dlopen("libGL.so.1", RTLD_NOW | RTLD_GLOBAL);
if (!m_libGL_handle)
{
m_libGL_handle = dlopen("libGL.so", RTLD_NOW | RTLD_GLOBAL);
if (!m_libGL_handle)
{
Log_ErrorPrintf("Failed to load libGL.so: %s", dlerror());
return false;
}
}
const int screen = DefaultScreen(GetDisplay());
if (!gladLoadGLX(GetDisplay(), screen))
{
Log_ErrorPrintf("Loading GLAD GLX functions failed");
return false;
}
if (m_wi.type == WindowInfo::Type::X11)
{
if (!CreateWindow(screen))
return false;
}
for (size_t i = 0; i < num_versions_to_try; i++)
{
const Version& cv = versions_to_try[i];
if (cv.profile == Profile::NoProfile && CreateAnyContext(nullptr, true))
{
m_version = cv;
return true;
}
else if (cv.profile != Profile::NoProfile && CreateVersionContext(cv, nullptr, true))
{
m_version = cv;
return true;
}
}
return false;
}
void* ContextGLX::GetProcAddress(const char* name)
{
return reinterpret_cast<void*>(glXGetProcAddress(reinterpret_cast<const GLubyte*>(name)));
}
bool ContextGLX::ChangeSurface(const WindowInfo& new_wi)
{
const bool was_current = (glXGetCurrentContext() == m_context);
if (was_current)
glXMakeContextCurrent(GetDisplay(), None, None, None);
m_window.Destroy();
m_wi = new_wi;
if (new_wi.type == WindowInfo::Type::X11)
{
const int screen = DefaultScreen(GetDisplay());
if (!CreateWindow(screen))
return false;
}
if (was_current && !glXMakeContextCurrent(GetDisplay(), GetDrawable(), GetDrawable(), m_context))
{
Log_ErrorPrintf("Failed to make context current again after surface change");
return false;
}
return true;
}
void ContextGLX::ResizeSurface(u32 new_surface_width /*= 0*/, u32 new_surface_height /*= 0*/)
{
m_window.Resize(new_surface_width, new_surface_height);
m_wi.surface_width = m_window.GetWidth();
m_wi.surface_height = m_window.GetHeight();
}
bool ContextGLX::SwapBuffers()
{
glXSwapBuffers(GetDisplay(), GetDrawable());
return true;
}
bool ContextGLX::IsCurrent()
{
return (m_context && glXGetCurrentContext() == m_context);
}
bool ContextGLX::MakeCurrent()
{
return (glXMakeContextCurrent(GetDisplay(), GetDrawable(), GetDrawable(), m_context) == True);
}
bool ContextGLX::DoneCurrent()
{
return (glXMakeContextCurrent(GetDisplay(), None, None, None) == True);
}
bool ContextGLX::SetSwapInterval(s32 interval)
{
if (GLAD_GLX_EXT_swap_control)
{
glXSwapIntervalEXT(GetDisplay(), GetDrawable(), interval);
return true;
}
else if (GLAD_GLX_MESA_swap_control)
{
return (glXSwapIntervalMESA(static_cast<u32>(std::max(interval, 0))) != 0);
}
else if (GLAD_GLX_SGI_swap_control)
{
return (glXSwapIntervalSGI(interval) != 0);
}
else
{
return false;
}
}
std::unique_ptr<Context> ContextGLX::CreateSharedContext(const WindowInfo& wi)
{
std::unique_ptr<ContextGLX> context = std::make_unique<ContextGLX>(wi);
if (wi.type == WindowInfo::Type::X11)
{
const int screen = DefaultScreen(context->GetDisplay());
if (!context->CreateWindow(screen))
return nullptr;
}
else
{
Panic("Create pbuffer");
}
if (m_version.profile == Profile::NoProfile)
{
if (!context->CreateAnyContext(m_context, false))
return nullptr;
}
else
{
if (!context->CreateVersionContext(m_version, m_context, false))
return nullptr;
}
context->m_version = m_version;
return context;
}
bool ContextGLX::CreateWindow(int screen)
{
int attribs[32] = {GLX_X_RENDERABLE, True, GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
GLX_X_VISUAL_TYPE, GLX_TRUE_COLOR, GLX_DOUBLEBUFFER, True};
int nattribs = 8;
switch (m_wi.surface_format)
{
case WindowInfo::SurfaceFormat::RGB8:
attribs[nattribs++] = GLX_RED_SIZE;
attribs[nattribs++] = 8;
attribs[nattribs++] = GLX_GREEN_SIZE;
attribs[nattribs++] = 8;
attribs[nattribs++] = GLX_BLUE_SIZE;
attribs[nattribs++] = 8;
break;
case WindowInfo::SurfaceFormat::RGBA8:
attribs[nattribs++] = GLX_RED_SIZE;
attribs[nattribs++] = 8;
attribs[nattribs++] = GLX_GREEN_SIZE;
attribs[nattribs++] = 8;
attribs[nattribs++] = GLX_BLUE_SIZE;
attribs[nattribs++] = 8;
attribs[nattribs++] = GLX_ALPHA_SIZE;
attribs[nattribs++] = 8;
break;
case WindowInfo::SurfaceFormat::RGB565:
attribs[nattribs++] = GLX_RED_SIZE;
attribs[nattribs++] = 5;
attribs[nattribs++] = GLX_GREEN_SIZE;
attribs[nattribs++] = 6;
attribs[nattribs++] = GLX_BLUE_SIZE;
attribs[nattribs++] = 5;
break;
case WindowInfo::SurfaceFormat::Auto:
break;
default:
UnreachableCode();
break;
}
attribs[nattribs++] = None;
attribs[nattribs++] = 0;
int fbcount = 0;
GLXFBConfig* fbc = glXChooseFBConfig(GetDisplay(), screen, attribs, &fbcount);
if (!fbc || !fbcount)
{
Log_ErrorPrintf("glXChooseFBConfig() failed");
return false;
}
m_fb_config = *fbc;
XFree(fbc);
if (!GLAD_GLX_VERSION_1_3)
{
Log_ErrorPrintf("GLX Version 1.3 is required");
return false;
}
m_vi = glXGetVisualFromFBConfig(GetDisplay(), m_fb_config);
if (!m_vi)
{
Log_ErrorPrintf("glXGetVisualFromFBConfig() failed");
return false;
}
return m_window.Create(GetDisplay(), static_cast<Window>(reinterpret_cast<uintptr_t>(m_wi.window_handle)), m_vi);
}
bool ContextGLX::CreateAnyContext(GLXContext share_context, bool make_current)
{
X11InhibitErrors ie;
m_context = glXCreateContext(GetDisplay(), m_vi, share_context, True);
if (!m_context || ie.HadError())
{
Log_ErrorPrintf("glxCreateContext() failed");
return false;
}
if (make_current)
{
if (!glXMakeCurrent(GetDisplay(), GetDrawable(), m_context))
{
Log_ErrorPrintf("glXMakeCurrent() failed");
return false;
}
}
return true;
}
bool ContextGLX::CreateVersionContext(const Version& version, GLXContext share_context, bool make_current)
{
// we need create context attribs
if (!GLAD_GLX_VERSION_1_3)
{
Log_ErrorPrint("Missing GLX version 1.3.");
return false;
}
int attribs[32];
int nattribs = 0;
attribs[nattribs++] = GLX_CONTEXT_PROFILE_MASK_ARB;
attribs[nattribs++] =
((version.profile == Profile::ES) ?
((version.major_version >= 2) ? GLX_CONTEXT_ES2_PROFILE_BIT_EXT : GLX_CONTEXT_ES_PROFILE_BIT_EXT) :
GLX_CONTEXT_CORE_PROFILE_BIT_ARB);
attribs[nattribs++] = GLX_CONTEXT_MAJOR_VERSION_ARB;
attribs[nattribs++] = version.major_version;
attribs[nattribs++] = GLX_CONTEXT_MINOR_VERSION_ARB;
attribs[nattribs++] = version.minor_version;
attribs[nattribs++] = None;
attribs[nattribs++] = 0;
X11InhibitErrors ie;
m_context = glXCreateContextAttribsARB(GetDisplay(), m_fb_config, share_context, True, attribs);
XSync(GetDisplay(), False);
if (ie.HadError())
m_context = nullptr;
if (!m_context)
return false;
if (make_current)
{
if (!glXMakeContextCurrent(GetDisplay(), GetDrawable(), GetDrawable(), m_context))
{
Log_ErrorPrint("glXMakeContextCurrent() failed");
glXDestroyContext(GetDisplay(), m_context);
m_context = nullptr;
return false;
}
}
return true;
}
} // namespace GL

View File

@ -1,48 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "context.h"
#include "glad_glx.h"
#include "x11_window.h"
namespace GL {
class ContextGLX final : public Context
{
public:
ContextGLX(const WindowInfo& wi);
~ContextGLX() override;
static std::unique_ptr<Context> Create(const WindowInfo& wi, const Version* versions_to_try,
size_t num_versions_to_try);
void* GetProcAddress(const char* name) override;
bool ChangeSurface(const WindowInfo& new_wi) override;
void ResizeSurface(u32 new_surface_width = 0, u32 new_surface_height = 0) override;
bool SwapBuffers() override;
bool IsCurrent() override;
bool MakeCurrent() override;
bool DoneCurrent() override;
bool SetSwapInterval(s32 interval) override;
std::unique_ptr<Context> CreateSharedContext(const WindowInfo& wi) override;
private:
ALWAYS_INLINE Display* GetDisplay() const { return static_cast<Display*>(m_wi.display_connection); }
ALWAYS_INLINE GLXDrawable GetDrawable() const { return static_cast<GLXDrawable>(m_window.GetWindow()); }
bool Initialize(const Version* versions_to_try, size_t num_versions_to_try);
bool CreateWindow(int screen);
bool CreateAnyContext(GLXContext share_context, bool make_current);
bool CreateVersionContext(const Version& version, GLXContext share_context, bool make_current);
GLXContext m_context = nullptr;
GLXFBConfig m_fb_config = {};
XVisualInfo* m_vi = nullptr;
X11Window m_window;
// GLAD releases its reference to libGL.so, so we need to maintain our own.
void* m_libGL_handle = nullptr;
};
} // namespace GL

View File

@ -1,622 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "program.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include <array>
#include <fstream>
Log_SetChannel(GL);
namespace GL {
GLuint Program::s_last_program_id = 0;
static GLuint s_next_bad_shader_id = 1;
Program::Program() = default;
Program::Program(Program&& prog)
{
m_program_id = prog.m_program_id;
prog.m_program_id = 0;
m_vertex_shader_id = prog.m_vertex_shader_id;
prog.m_vertex_shader_id = 0;
m_fragment_shader_id = prog.m_fragment_shader_id;
prog.m_fragment_shader_id = 0;
m_uniform_locations = std::move(prog.m_uniform_locations);
}
Program::~Program()
{
Destroy();
}
GLuint Program::CompileShader(GLenum type, const std::string_view source)
{
GLuint id = glCreateShader(type);
std::array<const GLchar*, 1> sources = {{source.data()}};
std::array<GLint, 1> source_lengths = {{static_cast<GLint>(source.size())}};
glShaderSource(id, static_cast<GLsizei>(sources.size()), sources.data(), source_lengths.data());
glCompileShader(id);
GLint status = GL_FALSE;
glGetShaderiv(id, GL_COMPILE_STATUS, &status);
GLint info_log_length = 0;
glGetShaderiv(id, GL_INFO_LOG_LENGTH, &info_log_length);
if (status == GL_FALSE || info_log_length > 0)
{
std::string info_log;
info_log.resize(info_log_length + 1);
glGetShaderInfoLog(id, info_log_length, &info_log_length, &info_log[0]);
if (status == GL_TRUE)
{
Log_ErrorPrintf("Shader compiled with warnings:\n%s", info_log.c_str());
}
else
{
Log_ErrorPrintf("Shader failed to compile:\n%s", info_log.c_str());
std::ofstream ofs(StringUtil::StdStringFromFormat("bad_shader_%u.txt", s_next_bad_shader_id++).c_str(),
std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs.write(sources[0], source_lengths[0]);
ofs << "\n\nCompile failed, info log:\n";
ofs << info_log;
ofs.close();
}
glDeleteShader(id);
return 0;
}
}
return id;
}
void Program::ResetLastProgram()
{
s_last_program_id = 0;
}
bool Program::Compile(const std::string_view vertex_shader,const std::string_view fragment_shader)
{
if (m_vertex_shader_id != 0)
{
glDeleteShader(m_vertex_shader_id);
m_vertex_shader_id = 0;
}
if (m_fragment_shader_id != 0)
{
glDeleteShader(m_fragment_shader_id);
m_fragment_shader_id = 0;
}
if (!vertex_shader.empty())
{
m_vertex_shader_id = CompileShader(GL_VERTEX_SHADER, vertex_shader);
if (m_vertex_shader_id == 0)
return false;
}
if (!fragment_shader.empty())
{
m_fragment_shader_id = CompileShader(GL_FRAGMENT_SHADER, fragment_shader);
if (m_fragment_shader_id == 0)
{
glDeleteShader(m_fragment_shader_id);
return false;
}
}
m_program_id = glCreateProgram();
if (m_vertex_shader_id != 0)
glAttachShader(m_program_id, m_vertex_shader_id);
if (m_fragment_shader_id != 0)
glAttachShader(m_program_id, m_fragment_shader_id);
return true;
}
bool Program::CreateFromBinary(const void* data, u32 data_length, u32 data_format)
{
GLuint prog = glCreateProgram();
glProgramBinary(prog, static_cast<GLenum>(data_format), data, data_length);
GLint link_status;
glGetProgramiv(prog, GL_LINK_STATUS, &link_status);
if (link_status != GL_TRUE)
{
Log_ErrorPrintf("Failed to create GL program from binary: status %d", link_status);
glDeleteProgram(prog);
return false;
}
m_program_id = prog;
return true;
}
bool Program::GetBinary(std::vector<u8>* out_data, u32* out_data_format)
{
GLint binary_size = 0;
glGetProgramiv(m_program_id, GL_PROGRAM_BINARY_LENGTH, &binary_size);
if (binary_size == 0)
{
Log_WarningPrint("glGetProgramiv(GL_PROGRAM_BINARY_LENGTH) returned 0");
return false;
}
GLenum format = 0;
out_data->resize(static_cast<size_t>(binary_size));
glGetProgramBinary(m_program_id, binary_size, &binary_size, &format, out_data->data());
if (binary_size == 0)
{
Log_WarningPrint("glGetProgramBinary() failed");
return false;
}
else if (static_cast<size_t>(binary_size) != out_data->size())
{
Log_WarningPrintf("Size changed from %zu to %d after glGetProgramBinary()", out_data->size(), binary_size);
out_data->resize(static_cast<size_t>(binary_size));
}
*out_data_format = static_cast<u32>(format);
Log_DevPrintf("Program binary retrieved, %zu bytes, format %u", out_data->size(), *out_data_format);
return true;
}
void Program::SetBinaryRetrievableHint()
{
glProgramParameteri(m_program_id, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
}
void Program::BindAttribute(GLuint index, const char* name)
{
glBindAttribLocation(m_program_id, index, name);
}
void Program::BindDefaultAttributes()
{
BindAttribute(0, "a_position");
BindAttribute(1, "a_texcoord");
BindAttribute(2, "a_color");
}
void Program::BindFragData(GLuint index /*= 0*/, const char* name /*= "o_col0"*/)
{
glBindFragDataLocation(m_program_id, index, name);
}
void Program::BindFragDataIndexed(GLuint color_number /*= 0*/, const char* name /*= "o_col0"*/)
{
if (GLAD_GL_VERSION_3_3 || GLAD_GL_ARB_blend_func_extended)
{
glBindFragDataLocationIndexed(m_program_id, color_number, 0, name);
return;
}
else if (GLAD_GL_EXT_blend_func_extended)
{
glBindFragDataLocationIndexedEXT(m_program_id, color_number, 0, name);
return;
}
Log_ErrorPrintf("BindFragDataIndexed() called without ARB or EXT extension, we'll probably crash.");
glBindFragDataLocationIndexed(m_program_id, color_number, 0, name);
}
bool Program::Link()
{
glLinkProgram(m_program_id);
if (m_vertex_shader_id != 0)
glDeleteShader(m_vertex_shader_id);
m_vertex_shader_id = 0;
if (m_fragment_shader_id != 0)
glDeleteShader(m_fragment_shader_id);
m_fragment_shader_id = 0;
GLint status = GL_FALSE;
glGetProgramiv(m_program_id, GL_LINK_STATUS, &status);
GLint info_log_length = 0;
glGetProgramiv(m_program_id, GL_INFO_LOG_LENGTH, &info_log_length);
if (status == GL_FALSE || info_log_length > 0)
{
std::string info_log;
info_log.resize(info_log_length + 1);
glGetProgramInfoLog(m_program_id, info_log_length, &info_log_length, &info_log[0]);
if (status == GL_TRUE)
{
Log_ErrorPrintf("Program linked with warnings:\n%s", info_log.c_str());
}
else
{
Log_ErrorPrintf("Program failed to link:\n%s", info_log.c_str());
glDeleteProgram(m_program_id);
m_program_id = 0;
return false;
}
}
return true;
}
void Program::Bind() const
{
if (s_last_program_id == m_program_id)
return;
glUseProgram(m_program_id);
s_last_program_id = m_program_id;
}
void Program::Destroy()
{
if (m_vertex_shader_id != 0)
{
glDeleteShader(m_vertex_shader_id);
m_vertex_shader_id = 0;
}
if (m_fragment_shader_id != 0)
{
glDeleteShader(m_fragment_shader_id);
m_fragment_shader_id = 0;
}
if (m_program_id != 0)
{
glDeleteProgram(m_program_id);
m_program_id = 0;
}
m_uniform_locations.clear();
}
int Program::RegisterUniform(const char* name)
{
int id = static_cast<int>(m_uniform_locations.size());
m_uniform_locations.push_back(glGetUniformLocation(m_program_id, name));
return id;
}
void Program::Uniform1ui(int index, u32 x) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform1ui(location, x);
}
void Program::Uniform2ui(int index, u32 x, u32 y) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2ui(location, x, y);
}
void Program::Uniform3ui(int index, u32 x, u32 y, u32 z) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3ui(location, x, y, z);
}
void Program::Uniform4ui(int index, u32 x, u32 y, u32 z, u32 w) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4ui(location, x, y, z, w);
}
void Program::Uniform1i(int index, s32 x) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform1i(location, x);
}
void Program::Uniform2i(int index, s32 x, s32 y) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2i(location, x, y);
}
void Program::Uniform3i(int index, s32 x, s32 y, s32 z) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3i(location, x, y, z);
}
void Program::Uniform4i(int index, s32 x, s32 y, s32 z, s32 w) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4i(location, x, y, z, w);
}
void Program::Uniform1f(int index, float x) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform1f(location, x);
}
void Program::Uniform2f(int index, float x, float y) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2f(location, x, y);
}
void Program::Uniform3f(int index, float x, float y, float z) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3f(location, x, y, z);
}
void Program::Uniform4f(int index, float x, float y, float z, float w) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4f(location, x, y, z, w);
}
void Program::Uniform2uiv(int index, const u32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2uiv(location, 1, v);
}
void Program::Uniform3uiv(int index, const u32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3uiv(location, 1, v);
}
void Program::Uniform4uiv(int index, const u32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4uiv(location, 1, v);
}
void Program::Uniform2iv(int index, const s32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2iv(location, 1, v);
}
void Program::Uniform3iv(int index, const s32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3iv(location, 1, v);
}
void Program::Uniform4iv(int index, const s32* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4iv(location, 1, v);
}
void Program::Uniform2fv(int index, const float* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform2fv(location, 1, v);
}
void Program::Uniform3fv(int index, const float* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform3fv(location, 1, v);
}
void Program::Uniform4fv(int index, const float* v) const
{
Assert(static_cast<size_t>(index) < m_uniform_locations.size());
const GLint location = m_uniform_locations[index];
if (location >= 0)
glUniform4fv(location, 1, v);
}
void Program::Uniform1ui(const char* name, u32 x) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform1ui(location, x);
}
void Program::Uniform2ui(const char* name, u32 x, u32 y) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2ui(location, x, y);
}
void Program::Uniform3ui(const char* name, u32 x, u32 y, u32 z) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3ui(location, x, y, z);
}
void Program::Uniform4ui(const char* name, u32 x, u32 y, u32 z, u32 w) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4ui(location, x, y, z, w);
}
void Program::Uniform1i(const char* name, s32 x) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform1i(location, x);
}
void Program::Uniform2i(const char* name, s32 x, s32 y) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2i(location, x, y);
}
void Program::Uniform3i(const char* name, s32 x, s32 y, s32 z) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3i(location, x, y, z);
}
void Program::Uniform4i(const char* name, s32 x, s32 y, s32 z, s32 w) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4i(location, x, y, z, w);
}
void Program::Uniform1f(const char* name, float x) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform1f(location, x);
}
void Program::Uniform2f(const char* name, float x, float y) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2f(location, x, y);
}
void Program::Uniform3f(const char* name, float x, float y, float z) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3f(location, x, y, z);
}
void Program::Uniform4f(const char* name, float x, float y, float z, float w) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4f(location, x, y, z, w);
}
void Program::Uniform2uiv(const char* name, const u32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2uiv(location, 1, v);
}
void Program::Uniform3uiv(const char* name, const u32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3uiv(location, 1, v);
}
void Program::Uniform4uiv(const char* name, const u32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4uiv(location, 1, v);
}
void Program::Uniform2iv(const char* name, const s32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2iv(location, 1, v);
}
void Program::Uniform3iv(const char* name, const s32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3iv(location, 1, v);
}
void Program::Uniform4iv(const char* name, const s32* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4iv(location, 1, v);
}
void Program::Uniform2fv(const char* name, const float* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform2fv(location, 1, v);
}
void Program::Uniform3fv(const char* name, const float* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform3fv(location, 1, v);
}
void Program::Uniform4fv(const char* name, const float* v) const
{
const GLint location = glGetUniformLocation(m_program_id, name);
if (location >= 0)
glUniform4fv(location, 1, v);
}
void Program::BindUniformBlock(const char* name, u32 index)
{
const GLint location = glGetUniformBlockIndex(m_program_id, name);
if (location >= 0)
glUniformBlockBinding(m_program_id, location, index);
}
Program& Program::operator=(Program&& prog)
{
Destroy();
m_program_id = prog.m_program_id;
prog.m_program_id = 0;
m_vertex_shader_id = prog.m_vertex_shader_id;
prog.m_vertex_shader_id = 0;
m_fragment_shader_id = prog.m_fragment_shader_id;
prog.m_fragment_shader_id = 0;
m_uniform_locations = std::move(prog.m_uniform_locations);
return *this;
}
} // namespace GL

View File

@ -1,104 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include "loader.h"
#include <string_view>
#include <vector>
namespace GL {
class Program
{
public:
Program();
Program(const Program&) = delete;
Program(Program&& prog);
~Program();
static GLuint CompileShader(GLenum type, const std::string_view source);
static void ResetLastProgram();
bool IsVaild() const { return m_program_id != 0; }
bool IsBound() const { return s_last_program_id == m_program_id; }
bool Compile(const std::string_view vertex_shader, const std::string_view fragment_shader);
bool CreateFromBinary(const void* data, u32 data_length, u32 data_format);
bool GetBinary(std::vector<u8>* out_data, u32* out_data_format);
void SetBinaryRetrievableHint();
void BindAttribute(GLuint index, const char* name);
void BindDefaultAttributes();
void BindFragData(GLuint index = 0, const char* name = "o_col0");
void BindFragDataIndexed(GLuint color_number = 0, const char* name = "o_col0");
bool Link();
void Bind() const;
void Destroy();
int RegisterUniform(const char* name);
void Uniform1ui(int index, u32 x) const;
void Uniform2ui(int index, u32 x, u32 y) const;
void Uniform3ui(int index, u32 x, u32 y, u32 z) const;
void Uniform4ui(int index, u32 x, u32 y, u32 z, u32 w) const;
void Uniform1i(int index, s32 x) const;
void Uniform2i(int index, s32 x, s32 y) const;
void Uniform3i(int index, s32 x, s32 y, s32 z) const;
void Uniform4i(int index, s32 x, s32 y, s32 z, s32 w) const;
void Uniform1f(int index, float x) const;
void Uniform2f(int index, float x, float y) const;
void Uniform3f(int index, float x, float y, float z) const;
void Uniform4f(int index, float x, float y, float z, float w) const;
void Uniform2uiv(int index, const u32* v) const;
void Uniform3uiv(int index, const u32* v) const;
void Uniform4uiv(int index, const u32* v) const;
void Uniform2iv(int index, const s32* v) const;
void Uniform3iv(int index, const s32* v) const;
void Uniform4iv(int index, const s32* v) const;
void Uniform2fv(int index, const float* v) const;
void Uniform3fv(int index, const float* v) const;
void Uniform4fv(int index, const float* v) const;
void Uniform1ui(const char* name, u32 x) const;
void Uniform2ui(const char* name, u32 x, u32 y) const;
void Uniform3ui(const char* name, u32 x, u32 y, u32 z) const;
void Uniform4ui(const char* name, u32 x, u32 y, u32 z, u32 w) const;
void Uniform1i(const char* name, s32 x) const;
void Uniform2i(const char* name, s32 x, s32 y) const;
void Uniform3i(const char* name, s32 x, s32 y, s32 z) const;
void Uniform4i(const char* name, s32 x, s32 y, s32 z, s32 w) const;
void Uniform1f(const char* name, float x) const;
void Uniform2f(const char* name, float x, float y) const;
void Uniform3f(const char* name, float x, float y, float z) const;
void Uniform4f(const char* name, float x, float y, float z, float w) const;
void Uniform2uiv(const char* name, const u32* v) const;
void Uniform3uiv(const char* name, const u32* v) const;
void Uniform4uiv(const char* name, const u32* v) const;
void Uniform2iv(const char* name, const s32* v) const;
void Uniform3iv(const char* name, const s32* v) const;
void Uniform4iv(const char* name, const s32* v) const;
void Uniform2fv(const char* name, const float* v) const;
void Uniform3fv(const char* name, const float* v) const;
void Uniform4fv(const char* name, const float* v) const;
void BindUniformBlock(const char* name, u32 index);
Program& operator=(const Program&) = delete;
Program& operator=(Program&& prog);
private:
static u32 s_last_program_id;
GLuint m_program_id = 0;
GLuint m_vertex_shader_id = 0;
GLuint m_fragment_shader_id = 0;
std::vector<GLint> m_uniform_locations;
};
} // namespace GL

View File

@ -1,344 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_cache.h"
#include "../file_system.h"
#include "../log.h"
#include "../md5_digest.h"
#include "../path.h"
#include "../string_util.h"
Log_SetChannel(GL::ShaderCache);
#pragma pack(push, 1)
struct CacheIndexEntry
{
u64 vertex_source_hash_low;
u64 vertex_source_hash_high;
u32 vertex_source_length;
u64 geometry_source_hash_low;
u64 geometry_source_hash_high;
u32 geometry_source_length;
u64 fragment_source_hash_low;
u64 fragment_source_hash_high;
u32 fragment_source_length;
u32 file_offset;
u32 blob_size;
u32 blob_format;
};
#pragma pack(pop)
GL::ShaderCache::ShaderCache() = default;
GL::ShaderCache::~ShaderCache()
{
Close();
}
bool GL::ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (
vertex_source_hash_low == key.vertex_source_hash_low && vertex_source_hash_high == key.vertex_source_hash_high &&
vertex_source_length == key.vertex_source_length && fragment_source_hash_low == key.fragment_source_hash_low &&
fragment_source_hash_high == key.fragment_source_hash_high && fragment_source_length == key.fragment_source_length);
}
bool GL::ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (
vertex_source_hash_low != key.vertex_source_hash_low || vertex_source_hash_high != key.vertex_source_hash_high ||
vertex_source_length != key.vertex_source_length || fragment_source_hash_low != key.fragment_source_hash_low ||
fragment_source_hash_high != key.fragment_source_hash_high || fragment_source_length != key.fragment_source_length);
}
void GL::ShaderCache::Open(bool is_gles, std::string_view base_path, u32 version)
{
m_base_path = base_path;
m_version = version;
m_program_binary_supported = is_gles || GLAD_GL_ARB_get_program_binary;
if (m_program_binary_supported)
{
// check that there's at least one format and the extension isn't being "faked"
GLint num_formats = 0;
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &num_formats);
Log_InfoPrintf("%u program binary formats supported by driver", num_formats);
m_program_binary_supported = (num_formats > 0);
}
if (!m_program_binary_supported)
{
Log_WarningPrintf("Your GL driver does not support program binaries. Hopefully it has a built-in cache, otherwise "
"startup will be slow due to compiling shaders.");
return;
}
if (!base_path.empty())
{
const std::string index_filename = GetIndexFileName();
const std::string blob_filename = GetBlobFileName();
if (!ReadExisting(index_filename, blob_filename))
CreateNew(index_filename, blob_filename);
}
}
bool GL::ShaderCache::CreateNew(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFile(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFile(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 ||
std::fwrite(&m_version, sizeof(m_version), 1, m_index_file) != 1)
{
Log_ErrorPrintf("Failed to write version to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
return true;
}
bool GL::ShaderCache::ReadExisting(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
return false;
u32 file_version = 0;
u32 data_version = 0;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION ||
std::fread(&data_version, sizeof(data_version), 1, m_index_file) != 1 || data_version != m_version)
{
Log_ErrorPrintf("Bad file/data version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.vertex_source_hash_low, entry.vertex_source_hash_high,
entry.vertex_source_length, entry.fragment_source_hash_low,
entry.fragment_source_hash_high, entry.fragment_source_length};
const CacheIndexData data{entry.file_offset, entry.blob_size, entry.blob_format};
m_index.emplace(key, data);
}
Log_InfoPrintf("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
void GL::ShaderCache::Close()
{
m_index.clear();
if (m_index_file)
std::fclose(m_index_file);
if (m_blob_file)
std::fclose(m_blob_file);
}
bool GL::ShaderCache::Recreate()
{
Close();
const std::string index_filename = GetIndexFileName();
const std::string blob_filename = GetBlobFileName();
return CreateNew(index_filename, blob_filename);
}
GL::ShaderCache::CacheIndexKey GL::ShaderCache::GetCacheKey(const std::string_view& vertex_shader,
const std::string_view& fragment_shader)
{
union ShaderHash
{
struct
{
u64 low;
u64 high;
};
u8 bytes[16];
};
ShaderHash vertex_hash = {};
ShaderHash fragment_hash = {};
MD5Digest digest;
if (!vertex_shader.empty())
{
digest.Update(vertex_shader.data(), static_cast<u32>(vertex_shader.length()));
digest.Final(vertex_hash.bytes);
}
if (!fragment_shader.empty())
{
digest.Reset();
digest.Update(fragment_shader.data(), static_cast<u32>(fragment_shader.length()));
digest.Final(fragment_hash.bytes);
}
return CacheIndexKey{vertex_hash.low, vertex_hash.high, static_cast<u32>(vertex_shader.length()),
fragment_hash.low, fragment_hash.high, static_cast<u32>(fragment_shader.length())};
}
std::string GL::ShaderCache::GetIndexFileName() const
{
return Path::Combine(m_base_path, "gl_programs.idx");
}
std::string GL::ShaderCache::GetBlobFileName() const
{
return Path::Combine(m_base_path, "gl_programs.bin");
}
std::optional<GL::Program> GL::ShaderCache::GetProgram(const std::string_view vertex_shader,
const std::string_view fragment_shader,
const PreLinkCallback& callback)
{
if (!m_program_binary_supported || !m_blob_file)
return CompileProgram(vertex_shader, fragment_shader, callback, false);
const auto key = GetCacheKey(vertex_shader, fragment_shader);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddProgram(key, vertex_shader, fragment_shader, callback);
std::vector<u8> data(iter->second.blob_size);
if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(data.data(), 1, iter->second.blob_size, m_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed");
return {};
}
Program prog;
if (prog.CreateFromBinary(data.data(), static_cast<u32>(data.size()), iter->second.blob_format))
return std::optional<Program>(std::move(prog));
Log_WarningPrintf(
"Failed to create program from binary, this may be due to a driver or GPU Change. Recreating cache.");
if (!Recreate())
return CompileProgram(vertex_shader, fragment_shader, callback, false);
else
return CompileAndAddProgram(key, vertex_shader, fragment_shader, callback);
}
std::optional<GL::Program> GL::ShaderCache::CompileProgram(const std::string_view& vertex_shader,
const std::string_view& fragment_shader,
const PreLinkCallback& callback, bool set_retrievable)
{
Program prog;
if (!prog.Compile(vertex_shader, fragment_shader))
return std::nullopt;
if (callback)
callback(prog);
if (set_retrievable)
prog.SetBinaryRetrievableHint();
if (!prog.Link())
return std::nullopt;
return std::optional<Program>(std::move(prog));
}
std::optional<GL::Program> GL::ShaderCache::CompileAndAddProgram(const CacheIndexKey& key,
const std::string_view& vertex_shader,
const std::string_view& fragment_shader,
const PreLinkCallback& callback)
{
std::optional<Program> prog = CompileProgram(vertex_shader, fragment_shader, callback, true);
if (!prog)
return std::nullopt;
std::vector<u8> prog_data;
u32 prog_format = 0;
if (!prog->GetBinary(&prog_data, &prog_format))
return std::nullopt;
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return prog;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(prog_data.size());
data.blob_format = prog_format;
CacheIndexEntry entry = {};
entry.vertex_source_hash_low = key.vertex_source_hash_low;
entry.vertex_source_hash_high = key.vertex_source_hash_high;
entry.vertex_source_length = key.vertex_source_length;
entry.fragment_source_hash_low = key.fragment_source_hash_low;
entry.fragment_source_hash_high = key.fragment_source_hash_high;
entry.fragment_source_length = key.fragment_source_length;
entry.file_offset = data.file_offset;
entry.blob_size = data.blob_size;
entry.blob_format = data.blob_format;
if (std::fwrite(prog_data.data(), 1, entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Log_ErrorPrintf("Failed to write shader blob to file");
return prog;
}
m_index.emplace(key, data);
return prog;
}

View File

@ -1,91 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../hash_combine.h"
#include "../types.h"
#include "program.h"
#include <cstdio>
#include <functional>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
namespace GL {
class ShaderCache
{
public:
using PreLinkCallback = std::function<void(Program&)>;
ShaderCache();
~ShaderCache();
void Open(bool is_gles, std::string_view base_path, u32 version);
std::optional<Program> GetProgram(const std::string_view vertex_shader, const std::string_view fragment_shader,
const PreLinkCallback& callback = {});
private:
static constexpr u32 FILE_VERSION = 4;
struct CacheIndexKey
{
u64 vertex_source_hash_low;
u64 vertex_source_hash_high;
u32 vertex_source_length;
u64 fragment_source_hash_low;
u64 fragment_source_hash_high;
u32 fragment_source_length;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
hash_combine(h, e.vertex_source_hash_low, e.vertex_source_hash_high, e.vertex_source_length,
e.fragment_source_hash_low, e.fragment_source_hash_high, e.fragment_source_length);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
u32 blob_format;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
static CacheIndexKey GetCacheKey(const std::string_view& vertex_shader, const std::string_view& fragment_shader);
std::string GetIndexFileName() const;
std::string GetBlobFileName() const;
bool CreateNew(const std::string& index_filename, const std::string& blob_filename);
bool ReadExisting(const std::string& index_filename, const std::string& blob_filename);
void Close();
bool Recreate();
std::optional<Program> CompileProgram(const std::string_view& vertex_shader, const std::string_view& fragment_shader,
const PreLinkCallback& callback, bool set_retrievable);
std::optional<Program> CompileAndAddProgram(const CacheIndexKey& key, const std::string_view& vertex_shader,
const std::string_view& fragment_shader, const PreLinkCallback& callback);
std::string m_base_path;
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
CacheIndex m_index;
u32 m_version = 0;
bool m_program_binary_supported = false;
};
} // namespace GL

View File

@ -1,379 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "texture.h"
#include "../assert.h"
#include "../log.h"
#include <array>
#include <limits>
#include <tuple>
Log_SetChannel(GL);
const std::tuple<GLenum, GLenum, GLenum>& GL::Texture::GetPixelFormatMapping(GPUTexture::Format format)
{
static constexpr std::array<std::tuple<GLenum, GLenum, GLenum>, static_cast<u32>(GPUTexture::Format::Count)> mapping =
{{
{}, // Unknown
{GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE}, // RGBA8
{GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE}, // BGRA8
{GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // RGB565
{GL_RGB5_A1, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV}, // RGBA5551
{GL_R8, GL_RED, GL_UNSIGNED_BYTE}, // R8
{GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_SHORT}, // D16
}};
static constexpr std::array<std::tuple<GLenum, GLenum, GLenum>, static_cast<u32>(GPUTexture::Format::Count)>
mapping_gles2 = {{
{}, // Unknown
{GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE}, // RGBA8
{}, // BGRA8
{GL_RGB, GL_RGB, GL_UNSIGNED_SHORT_5_6_5}, // RGB565
{}, // RGBA5551
{}, // R8
{}, // D16
}};
if (!GLAD_GL_ES_VERSION_2_0 || GLAD_GL_ES_VERSION_3_0)
return mapping[static_cast<u32>(format)];
else
return mapping_gles2[static_cast<u32>(format)];
}
GL::Texture::Texture() = default;
GL::Texture::Texture(Texture&& moved) : m_id(moved.m_id), m_fbo_id(moved.m_fbo_id)
{
m_width = moved.m_width;
m_height = moved.m_height;
m_levels = moved.m_levels;
m_layers = moved.m_layers;
m_samples = moved.m_samples;
m_format = moved.m_format;
moved.m_id = 0;
moved.m_fbo_id = 0;
moved.ClearBaseProperties();
}
GL::Texture::~Texture()
{
Destroy();
}
bool GL::Texture::UseTextureStorage(bool multisampled)
{
return GLAD_GL_ARB_texture_storage || (multisampled ? GLAD_GL_ES_VERSION_3_1 : GLAD_GL_ES_VERSION_3_0);
}
bool GL::Texture::UseTextureStorage() const
{
return UseTextureStorage(IsMultisampled());
}
bool GL::Texture::Create(u32 width, u32 height, u32 layers, u32 levels, u32 samples, Format format,
const void* data /* = nullptr */, u32 data_pitch /* = 0 */, bool linear /* = true */,
bool wrap /* = true */)
{
glGetError();
if (width > MAX_WIDTH || height > MAX_HEIGHT || layers > MAX_LAYERS || levels > MAX_LEVELS || samples > MAX_SAMPLES)
{
Log_ErrorPrintf("Invalid dimensions: %ux%ux%u %u %u", width, height, layers, levels, samples);
return false;
}
if (samples > 1 && levels > 1)
{
Log_ErrorPrintf("Multisampled textures can't have mip levels");
return false;
}
if (layers > 1 && data)
{
Log_ErrorPrintf("Loading texture array data not currently supported");
return false;
}
const GLenum target = ((samples > 1) ? ((layers > 1) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D_MULTISAMPLE_ARRAY) :
((layers > 1) ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D));
const auto [gl_internal_format, gl_format, gl_type] = GetPixelFormatMapping(format);
GLuint id;
glGenTextures(1, &id);
glBindTexture(target, id);
if (samples > 1)
{
Assert(!data);
if (UseTextureStorage(true))
{
if (layers > 1)
glTexStorage3DMultisample(target, samples, gl_internal_format, width, height, layers, GL_FALSE);
else
glTexStorage2DMultisample(target, samples, gl_internal_format, width, height, GL_FALSE);
}
else
{
if (layers > 1)
glTexImage3DMultisample(target, samples, gl_internal_format, width, height, layers, GL_FALSE);
else
glTexImage2DMultisample(target, samples, gl_internal_format, width, height, GL_FALSE);
}
glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, levels);
}
else
{
if (UseTextureStorage(false))
{
if (layers > 1)
glTexStorage3D(target, levels, gl_internal_format, width, height, layers);
else
glTexStorage2D(target, levels, gl_internal_format, width, height);
if (data)
{
// TODO: Fix data for mipmaps here.
if (layers > 1)
glTexSubImage3D(target, 0, 0, 0, 0, width, height, layers, gl_format, gl_type, data);
else
glTexSubImage2D(target, 0, 0, 0, width, height, gl_format, gl_type, data);
}
}
else
{
for (u32 i = 0; i < levels; i++)
{
// TODO: Fix data pointer here.
if (layers > 1)
glTexImage3D(target, i, gl_internal_format, width, height, layers, 0, gl_format, gl_type, data);
else
glTexImage2D(target, i, gl_internal_format, width, height, 0, gl_format, gl_type, data);
}
// This doesn't exist on GLES2.
if (!GLAD_GL_ES_VERSION_2_0 || GLAD_GL_ES_VERSION_3_0)
{
glTexParameteri(target, GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(target, GL_TEXTURE_MAX_LEVEL, levels);
}
}
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, linear ? GL_LINEAR : GL_NEAREST);
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, linear ? GL_LINEAR : GL_NEAREST);
glTexParameteri(target, GL_TEXTURE_WRAP_S, wrap ? GL_REPEAT : GL_CLAMP_TO_EDGE);
glTexParameteri(target, GL_TEXTURE_WRAP_T, wrap ? GL_REPEAT : GL_CLAMP_TO_EDGE);
if (layers > 1)
glTexParameteri(target, GL_TEXTURE_WRAP_R, wrap ? GL_REPEAT : GL_CLAMP_TO_EDGE);
}
GLenum error = glGetError();
if (error != GL_NO_ERROR)
{
Log_ErrorPrintf("Failed to create texture: 0x%X", error);
glDeleteTextures(1, &id);
return false;
}
if (IsValid())
Destroy();
m_id = id;
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_layers = static_cast<u8>(layers);
m_levels = static_cast<u8>(levels);
m_samples = static_cast<u8>(samples);
m_format = format;
return true;
}
void GL::Texture::Replace(u32 width, u32 height, GLenum internal_format, GLenum format, GLenum type, const void* data)
{
Assert(IsValid() && width < MAX_WIDTH && height < MAX_HEIGHT && m_layers == 1 && m_samples == 1 && m_levels == 1);
const bool size_changed = (width != m_width || height != m_height);
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_levels = 1;
const GLenum target = GetGLTarget();
glBindTexture(target, m_id);
if (UseTextureStorage())
{
if (size_changed)
{
if (m_layers > 0)
glTexStorage3D(target, m_levels, internal_format, m_width, m_height, m_levels);
else
glTexStorage2D(target, m_levels, internal_format, m_width, m_height);
}
glTexSubImage2D(target, 0, 0, 0, m_width, m_height, format, type, data);
}
else
{
glTexImage2D(target, 0, internal_format, width, height, 0, format, type, data);
}
}
void GL::Texture::ReplaceImage(u32 layer, u32 level, GLenum format, GLenum type, const void* data)
{
Assert(IsValid() && !IsMultisampled());
const GLenum target = GetGLTarget();
if (IsTextureArray())
glTexSubImage3D(target, level, 0, 0, layer, m_width, m_height, 1, format, type, data);
else
glTexSubImage2D(target, level, 0, 0, m_width, m_height, format, type, data);
}
void GL::Texture::ReplaceSubImage(u32 layer, u32 level, u32 x, u32 y, u32 width, u32 height, GLenum format, GLenum type,
const void* data)
{
Assert(IsValid() && !IsMultisampled());
const GLenum target = GetGLTarget();
if (IsTextureArray())
glTexSubImage3D(target, level, x, y, layer, width, height, 1, format, type, data);
else
glTexSubImage2D(target, level, x, y, width, height, format, type, data);
}
void GL::Texture::SetLinearFilter(bool enabled) const
{
Assert(!IsMultisampled());
Bind();
const GLenum target = GetGLTarget();
glTexParameteri(target, GL_TEXTURE_MIN_FILTER, enabled ? GL_LINEAR : GL_NEAREST);
glTexParameteri(target, GL_TEXTURE_MAG_FILTER, enabled ? GL_LINEAR : GL_NEAREST);
}
void GL::Texture::SetWrap(bool enabled) const
{
const GLenum target = GetGLTarget();
glTexParameteri(target, GL_TEXTURE_WRAP_S, enabled ? GL_REPEAT : GL_CLAMP_TO_EDGE);
glTexParameteri(target, GL_TEXTURE_WRAP_T, enabled ? GL_REPEAT : GL_CLAMP_TO_EDGE);
if (m_layers > 1)
glTexParameteri(target, GL_TEXTURE_WRAP_R, enabled ? GL_REPEAT : GL_CLAMP_TO_EDGE);
}
bool GL::Texture::CreateFramebuffer()
{
if (!IsValid())
return false;
glGetError();
GLuint fbo_id;
glGenFramebuffers(1, &fbo_id);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo_id);
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_id, 0);
if (glGetError() != GL_NO_ERROR || glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
glDeleteFramebuffers(1, &fbo_id);
return false;
}
if (m_fbo_id != 0)
glDeleteFramebuffers(1, &m_fbo_id);
m_fbo_id = fbo_id;
return true;
}
void GL::Texture::Destroy()
{
if (m_fbo_id != 0)
{
glDeleteFramebuffers(1, &m_fbo_id);
m_fbo_id = 0;
}
if (m_id != 0)
{
glDeleteTextures(1, &m_id);
m_id = 0;
}
ClearBaseProperties();
}
void GL::Texture::Bind() const
{
glBindTexture(GetGLTarget(), m_id);
}
void GL::Texture::BindFramebuffer(GLenum target /*= GL_DRAW_FRAMEBUFFER*/) const
{
DebugAssert(m_fbo_id != 0);
glBindFramebuffer(target, m_fbo_id);
}
void GL::Texture::Unbind() const
{
glBindTexture(GetGLTarget(), 0);
}
GL::Texture& GL::Texture::operator=(Texture&& moved)
{
Destroy();
m_id = moved.m_id;
m_fbo_id = moved.m_fbo_id;
m_width = moved.m_width;
m_height = moved.m_height;
m_layers = moved.m_layers;
m_levels = moved.m_levels;
m_samples = moved.m_samples;
moved.m_id = 0;
moved.m_fbo_id = 0;
moved.ClearBaseProperties();
return *this;
}
void GL::Texture::GetTextureSubImage(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type,
GLsizei bufSize, void* pixels)
{
if (GLAD_GL_VERSION_4_5 || GLAD_GL_ARB_get_texture_sub_image)
{
glGetTextureSubImage(texture, level, xoffset, yoffset, zoffset, width, height, depth, format, type, bufSize,
pixels);
return;
}
GLenum target = GL_READ_FRAMEBUFFER;
GLenum target_binding = GL_READ_FRAMEBUFFER_BINDING;
if (GLAD_GL_ES_VERSION_2_0 && !GLAD_GL_ES_VERSION_3_0)
{
// GLES2 doesn't have GL_READ_FRAMEBUFFER.
target = GL_FRAMEBUFFER;
target_binding = GL_FRAMEBUFFER_BINDING;
}
Assert(depth == 1);
GLuint old_read_fbo;
glGetIntegerv(target_binding, reinterpret_cast<GLint*>(&old_read_fbo));
GLuint temp_fbo;
glGenFramebuffers(1, &temp_fbo);
glBindFramebuffer(target, temp_fbo);
if (zoffset > 0 && (GLAD_GL_VERSION_3_0 || GLAD_GL_ES_VERSION_3_0))
glFramebufferTextureLayer(target, GL_COLOR_ATTACHMENT0, texture, level, zoffset);
else
glFramebufferTexture2D(target, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, level);
DebugAssert(glCheckFramebufferStatus(target) == GL_FRAMEBUFFER_COMPLETE);
glReadPixels(xoffset, yoffset, width, height, format, type, pixels);
glBindFramebuffer(target, old_read_fbo);
glDeleteFramebuffers(1, &temp_fbo);
}

View File

@ -1,63 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../gpu_texture.h"
#include "loader.h"
#include <tuple>
namespace GL {
class Texture final : public GPUTexture
{
public:
Texture();
Texture(Texture&& moved);
~Texture();
static bool UseTextureStorage(bool multisampled);
static const std::tuple<GLenum, GLenum, GLenum>& GetPixelFormatMapping(Format format);
ALWAYS_INLINE GLuint GetGLId() const { return m_id; }
bool IsValid() const override { return m_id != 0; }
bool Create(u32 width, u32 height, u32 layers, u32 levels, u32 samples, Format format, const void* data = nullptr,
u32 data_pitch = 0, bool linear = true, bool wrap = true);
void Destroy();
void Replace(u32 width, u32 height, GLenum internal_format, GLenum format, GLenum type, const void* data);
void ReplaceImage(u32 layer, u32 level, GLenum format, GLenum type, const void* data);
void ReplaceSubImage(u32 layer, u32 level, u32 x, u32 y, u32 width, u32 height, GLenum format, GLenum type,
const void* data);
bool CreateFramebuffer();
bool UseTextureStorage() const;
void SetLinearFilter(bool enabled) const;
void SetWrap(bool enabled) const;
ALWAYS_INLINE GLuint GetGLFramebufferID() const { return m_fbo_id; }
ALWAYS_INLINE GLenum GetGLTarget() const
{
return (IsMultisampled() ? (IsTextureArray() ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D_MULTISAMPLE_ARRAY) :
(IsTextureArray() ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D));
}
void Bind() const;
void BindFramebuffer(GLenum target = GL_DRAW_FRAMEBUFFER) const;
void Unbind() const;
Texture& operator=(const Texture& copy) = delete;
Texture& operator=(Texture&& moved);
// Helper which uses glGetTextureSubImage where available, otherwise a temporary FBO.
static void GetTextureSubImage(GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type,
GLsizei bufSize, void* pixels);
private:
GLuint m_id = 0;
GLuint m_fbo_id = 0;
};
} // namespace GL

View File

@ -1,104 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "x11_window.h"
#include "../assert.h"
#include "../log.h"
#include <cstdio>
Log_SetChannel(X11Window);
namespace GL {
X11Window::X11Window() = default;
X11Window::~X11Window()
{
Destroy();
}
bool X11Window::Create(Display* display, Window parent_window, const XVisualInfo* vi)
{
m_display = display;
m_parent_window = parent_window;
XSync(m_display, True);
XWindowAttributes parent_wa = {};
XGetWindowAttributes(m_display, m_parent_window, &parent_wa);
m_width = static_cast<u32>(parent_wa.width);
m_height = static_cast<u32>(parent_wa.height);
// Failed X calls terminate the process so no need to check for errors.
// We could swap the error handler out here as well.
m_colormap = XCreateColormap(m_display, m_parent_window, vi->visual, AllocNone);
XSetWindowAttributes wa = {};
wa.colormap = m_colormap;
m_window = XCreateWindow(m_display, m_parent_window, 0, 0, m_width, m_height, 0, vi->depth, InputOutput, vi->visual,
CWColormap, &wa);
XMapWindow(m_display, m_window);
XSync(m_display, True);
return true;
}
void X11Window::Destroy()
{
if (m_window)
{
XUnmapWindow(m_display, m_window);
XDestroyWindow(m_display, m_window);
m_window = {};
}
if (m_colormap)
{
XFreeColormap(m_display, m_colormap);
m_colormap = {};
}
}
void X11Window::Resize(u32 width, u32 height)
{
if (width != 0 && height != 0)
{
m_width = width;
m_height = height;
}
else
{
XWindowAttributes parent_wa = {};
XGetWindowAttributes(m_display, m_parent_window, &parent_wa);
m_width = static_cast<u32>(parent_wa.width);
m_height = static_cast<u32>(parent_wa.height);
}
XResizeWindow(m_display, m_window, m_width, m_height);
}
static X11InhibitErrors* s_current_error_inhibiter;
X11InhibitErrors::X11InhibitErrors()
{
Assert(!s_current_error_inhibiter);
m_old_handler = XSetErrorHandler(ErrorHandler);
s_current_error_inhibiter = this;
}
X11InhibitErrors::~X11InhibitErrors()
{
Assert(s_current_error_inhibiter == this);
s_current_error_inhibiter = nullptr;
XSetErrorHandler(m_old_handler);
}
int X11InhibitErrors::ErrorHandler(Display* display, XErrorEvent* ee)
{
char error_string[256] = {};
XGetErrorText(display, ee->error_code, error_string, sizeof(error_string));
Log_WarningPrintf("X11 Error: %s (Error %u Minor %u Request %u)", error_string, ee->error_code, ee->minor_code,
ee->request_code);
s_current_error_inhibiter->m_had_error = true;
return 0;
}
} // namespace GL

View File

@ -1,51 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include <X11/Xlib.h>
#include <X11/Xutil.h>
namespace GL {
class X11Window
{
public:
X11Window();
~X11Window();
ALWAYS_INLINE Window GetWindow() const { return m_window; }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
bool Create(Display* display, Window parent_window, const XVisualInfo* vi);
void Destroy();
// Setting a width/height of 0 will use parent dimensions.
void Resize(u32 width = 0, u32 height = 0);
private:
Display* m_display = nullptr;
Window m_parent_window = {};
Window m_window = {};
Colormap m_colormap = {};
u32 m_width = 0;
u32 m_height = 0;
};
// Helper class for managing X errors
class X11InhibitErrors
{
public:
X11InhibitErrors();
~X11InhibitErrors();
ALWAYS_INLINE bool HadError() const { return m_had_error; }
private:
static int ErrorHandler(Display* display, XErrorEvent* ee);
XErrorHandler m_old_handler = {};
bool m_had_error = false;
};
} // namespace GL

View File

@ -1,71 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "types.h"
#include <algorithm>
#include <vector>
class GPUTexture
{
public:
enum : u32
{
MAX_WIDTH = 65535,
MAX_HEIGHT = 65535,
MAX_LAYERS = 255,
MAX_LEVELS = 255,
MAX_SAMPLES = 255,
};
enum class Format : u8
{
Unknown,
RGBA8,
BGRA8,
RGB565,
RGBA5551,
R8,
D16,
Count
};
public:
virtual ~GPUTexture();
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
ALWAYS_INLINE u32 GetLayers() const { return m_layers; }
ALWAYS_INLINE u32 GetLevels() const { return m_levels; }
ALWAYS_INLINE u32 GetSamples() const { return m_samples; }
ALWAYS_INLINE GPUTexture::Format GetFormat() const { return m_format; }
ALWAYS_INLINE bool IsTextureArray() const { return m_layers > 1; }
ALWAYS_INLINE bool IsMultisampled() const { return m_samples > 1; }
ALWAYS_INLINE u32 GetPixelSize() const { return GetPixelSize(m_format); }
ALWAYS_INLINE u32 GetMipWidth(u32 level) const { return std::max<u32>(m_width >> level, 1u); }
ALWAYS_INLINE u32 GetMipHeight(u32 level) const { return std::max<u32>(m_height >> level, 1u); }
virtual bool IsValid() const = 0;
static u32 GetPixelSize(GPUTexture::Format format);
static bool IsDepthFormat(GPUTexture::Format format);
static bool ConvertTextureDataToRGBA8(u32 width, u32 height, std::vector<u32>& texture_data, u32& texture_data_stride,
GPUTexture::Format format);
static void FlipTextureDataRGBA8(u32 width, u32 height, std::vector<u32>& texture_data, u32 texture_data_stride);
protected:
GPUTexture();
GPUTexture(u16 width, u16 height, u8 layers, u8 levels, u8 samples, Format format);
void ClearBaseProperties();
u16 m_width = 0;
u16 m_height = 0;
u8 m_layers = 0;
u8 m_levels = 0;
u8 m_samples = 0;
Format m_format = Format::Unknown;
};

View File

@ -10,8 +10,6 @@
#include <algorithm>
Log_SetChannel(HTTPDownloaderWinHttp);
#pragma comment(lib, "winhttp.lib")
namespace Common {
HTTPDownloaderWinHttp::HTTPDownloaderWinHttp() : HTTPDownloader() {}

View File

@ -1,761 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "builders.h"
#include "../assert.h"
#include "util.h"
namespace Vulkan {
DescriptorSetLayoutBuilder::DescriptorSetLayoutBuilder()
{
Clear();
}
void DescriptorSetLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pBindings = nullptr;
m_ci.bindingCount = 0;
}
VkDescriptorSetLayout DescriptorSetLayoutBuilder::Create(VkDevice device)
{
VkDescriptorSetLayout layout;
VkResult res = vkCreateDescriptorSetLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDescriptorSetLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void DescriptorSetLayoutBuilder::AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages)
{
Assert(m_ci.bindingCount < MAX_BINDINGS);
VkDescriptorSetLayoutBinding& b = m_bindings[m_ci.bindingCount];
b.binding = binding;
b.descriptorType = dtype;
b.descriptorCount = dcount;
b.stageFlags = stages;
b.pImmutableSamplers = nullptr;
m_ci.pBindings = m_bindings.data();
m_ci.bindingCount++;
}
PipelineLayoutBuilder::PipelineLayoutBuilder()
{
Clear();
}
void PipelineLayoutBuilder::Clear()
{
m_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
m_ci.pNext = nullptr;
m_ci.flags = 0;
m_ci.pSetLayouts = nullptr;
m_ci.setLayoutCount = 0;
m_ci.pPushConstantRanges = nullptr;
m_ci.pushConstantRangeCount = 0;
}
VkPipelineLayout PipelineLayoutBuilder::Create(VkDevice device)
{
VkPipelineLayout layout;
VkResult res = vkCreatePipelineLayout(device, &m_ci, nullptr, &layout);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineLayout() failed: ");
return VK_NULL_HANDLE;
}
Clear();
return layout;
}
void PipelineLayoutBuilder::AddDescriptorSet(VkDescriptorSetLayout layout)
{
Assert(m_ci.setLayoutCount < MAX_SETS);
m_sets[m_ci.setLayoutCount] = layout;
m_ci.setLayoutCount++;
m_ci.pSetLayouts = m_sets.data();
}
void PipelineLayoutBuilder::AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size)
{
Assert(m_ci.pushConstantRangeCount < MAX_PUSH_CONSTANTS);
VkPushConstantRange& r = m_push_constants[m_ci.pushConstantRangeCount];
r.stageFlags = stages;
r.offset = offset;
r.size = size;
m_ci.pushConstantRangeCount++;
m_ci.pPushConstantRanges = m_push_constants.data();
}
GraphicsPipelineBuilder::GraphicsPipelineBuilder()
{
Clear();
}
void GraphicsPipelineBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
m_shader_stages = {};
m_vertex_input_state = {};
m_vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
m_ci.pVertexInputState = &m_vertex_input_state;
m_vertex_attributes = {};
m_vertex_buffers = {};
m_input_assembly = {};
m_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
m_rasterization_state = {};
m_rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
m_rasterization_state.lineWidth = 1.0f;
m_depth_state = {};
m_depth_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
m_blend_state = {};
m_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
m_blend_attachments = {};
m_viewport_state = {};
m_viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
m_viewport = {};
m_scissor = {};
m_dynamic_state = {};
m_dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
m_dynamic_state_values = {};
m_multisample_state = {};
m_multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
// set defaults
SetNoCullRasterizationState();
SetNoDepthTestState();
SetNoBlendingState();
SetPrimitiveTopology(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST);
// have to be specified even if dynamic
SetViewport(0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f);
SetScissorRect(0, 0, 1, 1);
SetMultisamples(VK_SAMPLE_COUNT_1_BIT);
}
VkPipeline GraphicsPipelineBuilder::Create(VkDevice device, VkPipelineCache pipeline_cache, bool clear /* = true */)
{
VkPipeline pipeline;
VkResult res = vkCreateGraphicsPipelines(device, pipeline_cache, 1, &m_ci, nullptr, &pipeline);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateGraphicsPipelines() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return pipeline;
}
void GraphicsPipelineBuilder::SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module,
const char* entry_point)
{
Assert(m_ci.stageCount < MAX_SHADER_STAGES);
u32 index = 0;
for (; index < m_ci.stageCount; index++)
{
if (m_shader_stages[index].stage == stage)
break;
}
if (index == m_ci.stageCount)
{
m_ci.stageCount++;
m_ci.pStages = m_shader_stages.data();
}
VkPipelineShaderStageCreateInfo& s = m_shader_stages[index];
s.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
s.stage = stage;
s.module = module;
s.pName = entry_point;
}
void GraphicsPipelineBuilder::AddVertexBuffer(u32 binding, u32 stride,
VkVertexInputRate input_rate /*= VK_VERTEX_INPUT_RATE_VERTEX*/)
{
Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputBindingDescription& b = m_vertex_buffers[m_vertex_input_state.vertexBindingDescriptionCount];
b.binding = binding;
b.stride = stride;
b.inputRate = input_rate;
m_vertex_input_state.vertexBindingDescriptionCount++;
m_vertex_input_state.pVertexBindingDescriptions = m_vertex_buffers.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset)
{
Assert(m_vertex_input_state.vertexAttributeDescriptionCount < MAX_VERTEX_BUFFERS);
VkVertexInputAttributeDescription& a = m_vertex_attributes[m_vertex_input_state.vertexAttributeDescriptionCount];
a.location = location;
a.binding = binding;
a.format = format;
a.offset = offset;
m_vertex_input_state.vertexAttributeDescriptionCount++;
m_vertex_input_state.pVertexAttributeDescriptions = m_vertex_attributes.data();
m_ci.pVertexInputState = &m_vertex_input_state;
}
void GraphicsPipelineBuilder::SetPrimitiveTopology(VkPrimitiveTopology topology,
bool enable_primitive_restart /*= false*/)
{
m_input_assembly.topology = topology;
m_input_assembly.primitiveRestartEnable = enable_primitive_restart;
m_ci.pInputAssemblyState = &m_input_assembly;
}
void GraphicsPipelineBuilder::SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode,
VkFrontFace front_face)
{
m_rasterization_state.polygonMode = polygon_mode;
m_rasterization_state.cullMode = cull_mode;
m_rasterization_state.frontFace = front_face;
m_ci.pRasterizationState = &m_rasterization_state;
}
void GraphicsPipelineBuilder::SetLineWidth(float width)
{
m_rasterization_state.lineWidth = width;
}
void GraphicsPipelineBuilder::SetMultisamples(u32 multisamples, bool per_sample_shading)
{
m_multisample_state.rasterizationSamples = static_cast<VkSampleCountFlagBits>(multisamples);
m_multisample_state.sampleShadingEnable = per_sample_shading;
m_multisample_state.minSampleShading = (multisamples > 1) ? 1.0f : 0.0f;
}
void GraphicsPipelineBuilder::SetNoCullRasterizationState()
{
SetRasterizationState(VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_CLOCKWISE);
}
void GraphicsPipelineBuilder::SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op)
{
m_depth_state.depthTestEnable = depth_test;
m_depth_state.depthWriteEnable = depth_write;
m_depth_state.depthCompareOp = compare_op;
m_ci.pDepthStencilState = &m_depth_state;
}
void GraphicsPipelineBuilder::SetNoDepthTestState()
{
SetDepthState(false, false, VK_COMPARE_OP_ALWAYS);
}
void GraphicsPipelineBuilder::SetBlendConstants(float r, float g, float b, float a)
{
m_blend_state.blendConstants[0] = r;
m_blend_state.blendConstants[1] = g;
m_blend_state.blendConstants[2] = b;
m_blend_state.blendConstants[3] = a;
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::AddBlendAttachment(
bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /* = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT */)
{
Assert(m_blend_state.attachmentCount < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[m_blend_state.attachmentCount];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
m_blend_state.attachmentCount++;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
void GraphicsPipelineBuilder::SetBlendAttachment(
u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op, VkColorComponentFlags write_mask /*= VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT*/)
{
Assert(attachment < MAX_ATTACHMENTS);
VkPipelineColorBlendAttachmentState& bs = m_blend_attachments[attachment];
bs.blendEnable = blend_enable;
bs.srcColorBlendFactor = src_factor;
bs.dstColorBlendFactor = dst_factor;
bs.colorBlendOp = op;
bs.srcAlphaBlendFactor = alpha_src_factor;
bs.dstAlphaBlendFactor = alpha_dst_factor;
bs.alphaBlendOp = alpha_op;
bs.colorWriteMask = write_mask;
if (attachment >= m_blend_state.attachmentCount)
{
m_blend_state.attachmentCount = attachment + 1u;
m_blend_state.pAttachments = m_blend_attachments.data();
m_ci.pColorBlendState = &m_blend_state;
}
}
void GraphicsPipelineBuilder::ClearBlendAttachments()
{
m_blend_attachments = {};
m_blend_state.attachmentCount = 0;
}
void GraphicsPipelineBuilder::SetNoBlendingState()
{
ClearBlendAttachments();
SetBlendAttachment(0, false, VK_BLEND_FACTOR_ONE, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD, VK_BLEND_FACTOR_ONE,
VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT |
VK_COLOR_COMPONENT_A_BIT);
}
void GraphicsPipelineBuilder::AddDynamicState(VkDynamicState state)
{
Assert(m_dynamic_state.dynamicStateCount < MAX_DYNAMIC_STATE);
m_dynamic_state_values[m_dynamic_state.dynamicStateCount] = state;
m_dynamic_state.dynamicStateCount++;
m_dynamic_state.pDynamicStates = m_dynamic_state_values.data();
m_ci.pDynamicState = &m_dynamic_state;
}
void GraphicsPipelineBuilder::SetDynamicViewportAndScissorState()
{
AddDynamicState(VK_DYNAMIC_STATE_VIEWPORT);
AddDynamicState(VK_DYNAMIC_STATE_SCISSOR);
}
void GraphicsPipelineBuilder::SetViewport(float x, float y, float width, float height, float min_depth, float max_depth)
{
m_viewport.x = x;
m_viewport.y = y;
m_viewport.width = width;
m_viewport.height = height;
m_viewport.minDepth = min_depth;
m_viewport.maxDepth = max_depth;
m_viewport_state.pViewports = &m_viewport;
m_viewport_state.viewportCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetScissorRect(s32 x, s32 y, u32 width, u32 height)
{
m_scissor.offset.x = x;
m_scissor.offset.y = y;
m_scissor.extent.width = width;
m_scissor.extent.height = height;
m_viewport_state.pScissors = &m_scissor;
m_viewport_state.scissorCount = 1u;
m_ci.pViewportState = &m_viewport_state;
}
void GraphicsPipelineBuilder::SetMultisamples(VkSampleCountFlagBits samples)
{
m_multisample_state.rasterizationSamples = samples;
m_ci.pMultisampleState = &m_multisample_state;
}
void GraphicsPipelineBuilder::SetPipelineLayout(VkPipelineLayout layout)
{
m_ci.layout = layout;
}
void GraphicsPipelineBuilder::SetRenderPass(VkRenderPass render_pass, u32 subpass)
{
m_ci.renderPass = render_pass;
m_ci.subpass = subpass;
}
SamplerBuilder::SamplerBuilder()
{
Clear();
}
void SamplerBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
}
VkSampler SamplerBuilder::Create(VkDevice device, bool clear /* = true */)
{
VkSampler sampler;
VkResult res = vkCreateSampler(device, &m_ci, nullptr, &sampler);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSampler() failed: ");
return VK_NULL_HANDLE;
}
return sampler;
}
void SamplerBuilder::SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter)
{
m_ci.magFilter = mag_filter;
m_ci.minFilter = min_filter;
m_ci.mipmapMode = mip_filter;
}
void SamplerBuilder::SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w)
{
m_ci.addressModeU = u;
m_ci.addressModeV = v;
m_ci.addressModeW = w;
}
void SamplerBuilder::SetBorderColor(VkBorderColor color)
{
m_ci.borderColor = color;
}
void SamplerBuilder::SetPointSampler(VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_NEAREST, VK_FILTER_NEAREST, VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
}
void SamplerBuilder::SetLinearSampler(bool mipmaps,
VkSamplerAddressMode address_mode /* = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER */)
{
Clear();
SetFilter(VK_FILTER_LINEAR, VK_FILTER_LINEAR,
mipmaps ? VK_SAMPLER_MIPMAP_MODE_LINEAR : VK_SAMPLER_MIPMAP_MODE_NEAREST);
SetAddressMode(address_mode, address_mode, address_mode);
if (mipmaps)
{
m_ci.minLod = std::numeric_limits<float>::min();
m_ci.maxLod = std::numeric_limits<float>::max();
}
}
DescriptorSetUpdateBuilder::DescriptorSetUpdateBuilder()
{
Clear();
}
void DescriptorSetUpdateBuilder::Clear()
{
m_writes = {};
m_num_writes = 0;
}
void DescriptorSetUpdateBuilder::Update(VkDevice device, bool clear /*= true*/)
{
Assert(m_num_writes > 0);
vkUpdateDescriptorSets(device, m_num_writes, (m_num_writes > 0) ? m_writes.data() : nullptr, 0, nullptr);
if (clear)
Clear();
}
void DescriptorSetUpdateBuilder::AddImageDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = VK_NULL_HANDLE;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = VK_NULL_HANDLE;
ii.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddCombinedImageSamplerDescriptorWrite(
VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler,
VkImageLayout layout /*= VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL*/)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorImageInfo& ii = m_infos[m_num_infos++].image;
ii.imageView = view;
ii.imageLayout = layout;
ii.sampler = sampler;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dw.pImageInfo = &ii;
}
void DescriptorSetUpdateBuilder::AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype,
VkBuffer buffer, u32 offset, u32 size)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkDescriptorBufferInfo& bi = m_infos[m_num_infos++].buffer;
bi.buffer = buffer;
bi.offset = offset;
bi.range = size;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pBufferInfo = &bi;
}
void DescriptorSetUpdateBuilder::AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype,
VkBufferView view)
{
Assert(m_num_writes < MAX_WRITES && m_num_infos < MAX_INFOS);
VkBufferView& bi = m_infos[m_num_infos++].buffer_view;
bi = view;
VkWriteDescriptorSet& dw = m_writes[m_num_writes++];
dw.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
dw.dstSet = set;
dw.dstBinding = binding;
dw.descriptorCount = 1;
dw.descriptorType = dtype;
dw.pTexelBufferView = &bi;
}
FramebufferBuilder::FramebufferBuilder()
{
Clear();
}
void FramebufferBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
m_images = {};
}
VkFramebuffer FramebufferBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkFramebuffer fb;
VkResult res = vkCreateFramebuffer(device, &m_ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
if (clear)
Clear();
return fb;
}
void FramebufferBuilder::AddAttachment(VkImageView image)
{
Assert(m_ci.attachmentCount < MAX_ATTACHMENTS);
m_images[m_ci.attachmentCount] = image;
m_ci.attachmentCount++;
m_ci.pAttachments = m_images.data();
}
void FramebufferBuilder::SetSize(u32 width, u32 height, u32 layers)
{
m_ci.width = width;
m_ci.height = height;
m_ci.layers = layers;
}
void FramebufferBuilder::SetRenderPass(VkRenderPass render_pass)
{
m_ci.renderPass = render_pass;
}
RenderPassBuilder::RenderPassBuilder()
{
Clear();
}
void RenderPassBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
m_attachments = {};
m_attachment_references = {};
m_num_attachment_references = 0;
m_subpasses = {};
}
VkRenderPass RenderPassBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkRenderPass rp;
VkResult res = vkCreateRenderPass(device, &m_ci, nullptr, &rp);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateRenderPass() failed: ");
return VK_NULL_HANDLE;
}
return rp;
}
u32 RenderPassBuilder::AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout,
VkImageLayout final_layout)
{
Assert(m_ci.attachmentCount < MAX_ATTACHMENTS);
const u32 index = m_ci.attachmentCount;
VkAttachmentDescription& ad = m_attachments[index];
ad.format = format;
ad.samples = samples;
ad.loadOp = load_op;
ad.storeOp = store_op;
ad.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
ad.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
ad.initialLayout = initial_layout;
ad.finalLayout = final_layout;
m_ci.attachmentCount++;
m_ci.pAttachments = m_attachments.data();
return index;
}
u32 RenderPassBuilder::AddSubpass()
{
Assert(m_ci.subpassCount < MAX_SUBPASSES);
const u32 index = m_ci.subpassCount;
VkSubpassDescription& sp = m_subpasses[index];
sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
m_ci.subpassCount++;
m_ci.pSubpasses = m_subpasses.data();
return index;
}
void RenderPassBuilder::AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
if (sp.colorAttachmentCount == 0)
sp.pColorAttachments = &ar;
sp.colorAttachmentCount++;
}
void RenderPassBuilder::AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout)
{
Assert(subpass < m_ci.subpassCount && m_num_attachment_references < MAX_ATTACHMENT_REFERENCES);
VkAttachmentReference& ar = m_attachment_references[m_num_attachment_references++];
ar.attachment = attachment;
ar.layout = layout;
VkSubpassDescription& sp = m_subpasses[subpass];
sp.pDepthStencilAttachment = &ar;
}
BufferViewBuilder::BufferViewBuilder()
{
Clear();
}
void BufferViewBuilder::Clear()
{
m_ci = {};
m_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
}
VkBufferView BufferViewBuilder::Create(VkDevice device, bool clear /*= true*/)
{
VkBufferView bv;
VkResult res = vkCreateBufferView(device, &m_ci, nullptr, &bv);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateBufferView() failed: ");
return VK_NULL_HANDLE;
}
return bv;
}
void BufferViewBuilder::Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size)
{
m_ci.buffer = buffer;
m_ci.format = format;
m_ci.offset = offset;
m_ci.range = size;
}
} // namespace Vulkan

View File

@ -1,274 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include "loader.h"
#include <array>
namespace Vulkan {
class DescriptorSetLayoutBuilder
{
public:
enum : u32
{
MAX_BINDINGS = 16,
};
DescriptorSetLayoutBuilder();
void Clear();
VkDescriptorSetLayout Create(VkDevice device);
void AddBinding(u32 binding, VkDescriptorType dtype, u32 dcount, VkShaderStageFlags stages);
private:
VkDescriptorSetLayoutCreateInfo m_ci{};
std::array<VkDescriptorSetLayoutBinding, MAX_BINDINGS> m_bindings{};
};
class PipelineLayoutBuilder
{
public:
enum : u32
{
MAX_SETS = 8,
MAX_PUSH_CONSTANTS = 1
};
PipelineLayoutBuilder();
void Clear();
VkPipelineLayout Create(VkDevice device);
void AddDescriptorSet(VkDescriptorSetLayout layout);
void AddPushConstants(VkShaderStageFlags stages, u32 offset, u32 size);
private:
VkPipelineLayoutCreateInfo m_ci{};
std::array<VkDescriptorSetLayout, MAX_SETS> m_sets{};
std::array<VkPushConstantRange, MAX_PUSH_CONSTANTS> m_push_constants{};
};
class GraphicsPipelineBuilder
{
public:
enum : u32
{
MAX_SHADER_STAGES = 3,
MAX_VERTEX_ATTRIBUTES = 16,
MAX_VERTEX_BUFFERS = 8,
MAX_ATTACHMENTS = 2,
MAX_DYNAMIC_STATE = 8
};
GraphicsPipelineBuilder();
void Clear();
VkPipeline Create(VkDevice device, VkPipelineCache pipeline_cache = VK_NULL_HANDLE, bool clear = true);
void SetShaderStage(VkShaderStageFlagBits stage, VkShaderModule module, const char* entry_point);
void SetVertexShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_VERTEX_BIT, module, "main"); }
void SetGeometryShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, module, "main"); }
void SetFragmentShader(VkShaderModule module) { SetShaderStage(VK_SHADER_STAGE_FRAGMENT_BIT, module, "main"); }
void AddVertexBuffer(u32 binding, u32 stride, VkVertexInputRate input_rate = VK_VERTEX_INPUT_RATE_VERTEX);
void AddVertexAttribute(u32 location, u32 binding, VkFormat format, u32 offset);
void SetPrimitiveTopology(VkPrimitiveTopology topology, bool enable_primitive_restart = false);
void SetRasterizationState(VkPolygonMode polygon_mode, VkCullModeFlags cull_mode, VkFrontFace front_face);
void SetLineWidth(float width);
void SetMultisamples(u32 multisamples, bool per_sample_shading);
void SetNoCullRasterizationState();
void SetDepthState(bool depth_test, bool depth_write, VkCompareOp compare_op);
void SetNoDepthTestState();
void AddBlendAttachment(bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor, VkBlendOp op,
VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor, VkBlendOp alpha_op,
VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
void SetBlendAttachment(u32 attachment, bool blend_enable, VkBlendFactor src_factor, VkBlendFactor dst_factor,
VkBlendOp op, VkBlendFactor alpha_src_factor, VkBlendFactor alpha_dst_factor,
VkBlendOp alpha_op,
VkColorComponentFlags write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT);
void ClearBlendAttachments();
void SetBlendConstants(float r, float g, float b, float a);
void SetNoBlendingState();
void AddDynamicState(VkDynamicState state);
void SetDynamicViewportAndScissorState();
void SetViewport(float x, float y, float width, float height, float min_depth, float max_depth);
void SetScissorRect(s32 x, s32 y, u32 width, u32 height);
void SetMultisamples(VkSampleCountFlagBits samples);
void SetPipelineLayout(VkPipelineLayout layout);
void SetRenderPass(VkRenderPass render_pass, u32 subpass);
private:
VkGraphicsPipelineCreateInfo m_ci;
std::array<VkPipelineShaderStageCreateInfo, MAX_SHADER_STAGES> m_shader_stages;
VkPipelineVertexInputStateCreateInfo m_vertex_input_state;
std::array<VkVertexInputBindingDescription, MAX_VERTEX_BUFFERS> m_vertex_buffers;
std::array<VkVertexInputAttributeDescription, MAX_VERTEX_ATTRIBUTES> m_vertex_attributes;
VkPipelineInputAssemblyStateCreateInfo m_input_assembly;
VkPipelineRasterizationStateCreateInfo m_rasterization_state;
VkPipelineDepthStencilStateCreateInfo m_depth_state;
VkPipelineColorBlendStateCreateInfo m_blend_state;
std::array<VkPipelineColorBlendAttachmentState, MAX_ATTACHMENTS> m_blend_attachments;
VkPipelineViewportStateCreateInfo m_viewport_state;
VkViewport m_viewport;
VkRect2D m_scissor;
VkPipelineDynamicStateCreateInfo m_dynamic_state;
std::array<VkDynamicState, MAX_DYNAMIC_STATE> m_dynamic_state_values;
VkPipelineMultisampleStateCreateInfo m_multisample_state;
};
class SamplerBuilder
{
public:
SamplerBuilder();
void Clear();
VkSampler Create(VkDevice device, bool clear = true);
void SetFilter(VkFilter mag_filter, VkFilter min_filter, VkSamplerMipmapMode mip_filter);
void SetAddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w);
void SetBorderColor(VkBorderColor color);
void SetPointSampler(VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE);
void SetLinearSampler(bool mipmaps, VkSamplerAddressMode address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE);
private:
VkSamplerCreateInfo m_ci;
};
class DescriptorSetUpdateBuilder
{
enum : u32
{
MAX_WRITES = 16,
MAX_INFOS = 16,
};
public:
DescriptorSetUpdateBuilder();
void Clear();
void Update(VkDevice device, bool clear = true);
void AddImageDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkSampler sampler);
void AddCombinedImageSamplerDescriptorWrite(VkDescriptorSet set, u32 binding, VkImageView view, VkSampler sampler,
VkImageLayout layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
void AddBufferDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBuffer buffer, u32 offset,
u32 size);
void AddBufferViewDescriptorWrite(VkDescriptorSet set, u32 binding, VkDescriptorType dtype, VkBufferView view);
private:
union InfoUnion
{
VkDescriptorBufferInfo buffer;
VkDescriptorImageInfo image;
VkBufferView buffer_view;
};
std::array<VkWriteDescriptorSet, MAX_WRITES> m_writes;
u32 m_num_writes = 0;
std::array<InfoUnion, MAX_INFOS> m_infos;
u32 m_num_infos = 0;
};
class FramebufferBuilder
{
enum : u32
{
MAX_ATTACHMENTS = 2,
};
public:
FramebufferBuilder();
void Clear();
VkFramebuffer Create(VkDevice device, bool clear = true);
void AddAttachment(VkImageView image);
void SetSize(u32 width, u32 height, u32 layers);
void SetRenderPass(VkRenderPass render_pass);
private:
VkFramebufferCreateInfo m_ci;
std::array<VkImageView, MAX_ATTACHMENTS> m_images;
};
class RenderPassBuilder
{
enum : u32
{
MAX_ATTACHMENTS = 2,
MAX_ATTACHMENT_REFERENCES = 2,
MAX_SUBPASSES = 1,
};
public:
RenderPassBuilder();
void Clear();
VkRenderPass Create(VkDevice device, bool clear = true);
u32 AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load_op,
VkAttachmentStoreOp store_op, VkImageLayout initial_layout, VkImageLayout final_layout);
u32 AddSubpass();
void AddSubpassColorAttachment(u32 subpass, u32 attachment, VkImageLayout layout);
void AddSubpassDepthAttachment(u32 subpass, u32 attachment, VkImageLayout layout);
private:
VkRenderPassCreateInfo m_ci;
std::array<VkAttachmentDescription, MAX_ATTACHMENTS> m_attachments;
std::array<VkAttachmentReference, MAX_ATTACHMENT_REFERENCES> m_attachment_references;
u32 m_num_attachment_references = 0;
std::array<VkSubpassDescription, MAX_SUBPASSES> m_subpasses;
};
class BufferViewBuilder
{
public:
BufferViewBuilder();
void Clear();
VkBufferView Create(VkDevice device, bool clear = true);
void Set(VkBuffer buffer, VkFormat format, u32 offset, u32 size);
private:
VkBufferViewCreateInfo m_ci;
};
} // namespace Vulkan

File diff suppressed because it is too large Load Diff

View File

@ -1,298 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include "loader.h"
#include "stream_buffer.h"
#include <array>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
struct WindowInfo;
namespace Vulkan {
class SwapChain;
class Context
{
public:
enum : u32
{
NUM_COMMAND_BUFFERS = 3
};
struct OptionalExtensions
{
bool vk_ext_memory_budget : 1;
bool vk_khr_driver_properties : 1;
};
~Context();
// Determines if the Vulkan validation layer is available on the system.
static bool CheckValidationLayerAvailablility();
// Helper method to create a Vulkan instance.
static VkInstance CreateVulkanInstance(const WindowInfo* wi, bool enable_debug_utils, bool enable_validation_layer);
// Returns a list of Vulkan-compatible GPUs.
using GPUList = std::vector<VkPhysicalDevice>;
using GPUNameList = std::vector<std::string>;
static GPUList EnumerateGPUs(VkInstance instance);
static GPUNameList EnumerateGPUNames(VkInstance instance);
// Creates a new context and sets it up as global.
static bool Create(std::string_view gpu_name, const WindowInfo* wi, std::unique_ptr<SwapChain>* out_swap_chain,
bool threaded_presentation, bool enable_debug_utils, bool enable_validation_layer, bool vsync);
// Destroys context.
static void Destroy();
// Enable/disable debug message runtime.
bool EnableDebugUtils();
void DisableDebugUtils();
// Global state accessors
ALWAYS_INLINE VkInstance GetVulkanInstance() const { return m_instance; }
ALWAYS_INLINE VkPhysicalDevice GetPhysicalDevice() const { return m_physical_device; }
ALWAYS_INLINE VkDevice GetDevice() const { return m_device; }
ALWAYS_INLINE VmaAllocator GetAllocator() const { return m_allocator; }
ALWAYS_INLINE VkQueue GetGraphicsQueue() const { return m_graphics_queue; }
ALWAYS_INLINE u32 GetGraphicsQueueFamilyIndex() const { return m_graphics_queue_family_index; }
ALWAYS_INLINE VkQueue GetPresentQueue() const { return m_present_queue; }
ALWAYS_INLINE u32 GetPresentQueueFamilyIndex() const { return m_present_queue_family_index; }
ALWAYS_INLINE const VkQueueFamilyProperties& GetGraphicsQueueProperties() const
{
return m_graphics_queue_properties;
}
ALWAYS_INLINE const VkPhysicalDeviceMemoryProperties& GetDeviceMemoryProperties() const
{
return m_device_memory_properties;
}
ALWAYS_INLINE const VkPhysicalDeviceProperties& GetDeviceProperties() const { return m_device_properties; }
ALWAYS_INLINE const VkPhysicalDeviceFeatures& GetDeviceFeatures() const { return m_device_features; }
ALWAYS_INLINE const VkPhysicalDeviceLimits& GetDeviceLimits() const { return m_device_properties.limits; }
ALWAYS_INLINE const VkPhysicalDeviceDriverProperties& GetDeviceDriverProperties() const
{
return m_device_driver_properties;
}
// Support bits
ALWAYS_INLINE bool SupportsGeometryShaders() const { return m_device_features.geometryShader == VK_TRUE; }
ALWAYS_INLINE bool SupportsDualSourceBlend() const { return m_device_features.dualSrcBlend == VK_TRUE; }
// Helpers for getting constants
ALWAYS_INLINE u32 GetUniformBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minUniformBufferOffsetAlignment);
}
ALWAYS_INLINE u32 GetTexelBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minTexelBufferOffsetAlignment);
}
ALWAYS_INLINE u32 GetStorageBufferAlignment() const
{
return static_cast<u32>(m_device_properties.limits.minStorageBufferOffsetAlignment);
}
ALWAYS_INLINE u32 GetBufferImageGranularity() const
{
return static_cast<u32>(m_device_properties.limits.bufferImageGranularity);
}
ALWAYS_INLINE u32 GetBufferCopyOffsetAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyOffsetAlignment);
}
ALWAYS_INLINE u32 GetBufferCopyRowPitchAlignment() const
{
return static_cast<u32>(m_device_properties.limits.optimalBufferCopyRowPitchAlignment);
}
ALWAYS_INLINE u32 GetMaxImageDimension2D() const { return m_device_properties.limits.maxImageDimension2D; }
// Creates a simple render pass.
VkRenderPass GetRenderPass(VkFormat color_format, VkFormat depth_format, VkSampleCountFlagBits samples,
VkAttachmentLoadOp load_op);
// These command buffers are allocated per-frame. They are valid until the command buffer
// is submitted, after that you should call these functions again.
ALWAYS_INLINE VkDescriptorPool GetGlobalDescriptorPool() const { return m_global_descriptor_pool; }
ALWAYS_INLINE VkCommandBuffer GetCurrentCommandBuffer() const { return m_current_command_buffer; }
ALWAYS_INLINE StreamBuffer& GetTextureUploadBuffer() { return m_texture_upload_buffer; }
ALWAYS_INLINE VkDescriptorPool GetCurrentDescriptorPool() const
{
return m_frame_resources[m_current_frame].descriptor_pool;
}
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateDescriptorSet(VkDescriptorSetLayout set_layout);
/// Allocates a descriptor set from the pool reserved for the current frame.
VkDescriptorSet AllocateGlobalDescriptorSet(VkDescriptorSetLayout set_layout);
/// Frees a descriptor set allocated from the global pool.
void FreeGlobalDescriptorSet(VkDescriptorSet set);
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
ALWAYS_INLINE VkFence GetCurrentCommandBufferFence() const { return m_frame_resources[m_current_frame].fence; }
// Fence "counters" are used to track which commands have been completed by the GPU.
// If the last completed fence counter is greater or equal to N, it means that the work
// associated counter N has been completed by the GPU. The value of N to associate with
// commands can be retreived by calling GetCurrentFenceCounter().
u64 GetCompletedFenceCounter() const { return m_completed_fence_counter; }
// Gets the fence that will be signaled when the currently executing command buffer is
// queued and executed. Do not wait for this fence before the buffer is executed.
u64 GetCurrentFenceCounter() const { return m_frame_resources[m_current_frame].fence_counter; }
void SubmitCommandBuffer(VkSemaphore wait_semaphore = VK_NULL_HANDLE, VkSemaphore signal_semaphore = VK_NULL_HANDLE,
VkSwapchainKHR present_swap_chain = VK_NULL_HANDLE,
uint32_t present_image_index = 0xFFFFFFFF, bool submit_on_thread = false);
void MoveToNextCommandBuffer();
void ExecuteCommandBuffer(bool wait_for_completion);
void WaitForPresentComplete();
// Was the last present submitted to the queue a failure? If so, we must recreate our swapchain.
bool CheckLastPresentFail();
// Schedule a vulkan resource for destruction later on. This will occur when the command buffer
// is next re-used, and the GPU has finished working with the specified resource.
void DeferBufferDestruction(VkBuffer object);
void DeferBufferDestruction(VkBuffer object, VmaAllocation allocation);
void DeferBufferViewDestruction(VkBufferView object);
void DeferDeviceMemoryDestruction(VkDeviceMemory object);
void DeferFramebufferDestruction(VkFramebuffer object);
void DeferImageDestruction(VkImage object);
void DeferImageDestruction(VkImage object, VmaAllocation allocation);
void DeferImageViewDestruction(VkImageView object);
void DeferPipelineDestruction(VkPipeline pipeline);
// Wait for a fence to be completed.
// Also invokes callbacks for completion.
void WaitForFenceCounter(u64 fence_counter);
void WaitForGPUIdle();
float GetAndResetAccumulatedGPUTime();
bool SetEnableGPUTiming(bool enabled);
private:
Context(VkInstance instance, VkPhysicalDevice physical_device, bool owns_device);
using ExtensionList = std::vector<const char*>;
static bool SelectInstanceExtensions(ExtensionList* extension_list, const WindowInfo* wi, bool enable_debug_utils);
bool SelectDeviceExtensions(ExtensionList* extension_list, bool enable_surface);
bool SelectDeviceFeatures(const VkPhysicalDeviceFeatures* required_features);
bool CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer, const char** required_device_extensions,
u32 num_required_device_extensions, const char** required_device_layers,
u32 num_required_device_layers, const VkPhysicalDeviceFeatures* required_features);
void ProcessDeviceExtensions();
bool CreateAllocator();
void DestroyAllocator();
bool CreateCommandBuffers();
void DestroyCommandBuffers();
bool CreateGlobalDescriptorPool();
void DestroyGlobalDescriptorPool();
bool CreateQueryPool();
void DestroyQueryPool();
bool CreateTextureStreamBuffer();
void DestroyRenderPassCache();
void ActivateCommandBuffer(u32 index);
void WaitForCommandBufferCompletion(u32 index);
void DoSubmitCommandBuffer(u32 index, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore);
void DoPresent(VkSemaphore wait_semaphore, VkSwapchainKHR present_swap_chain, uint32_t present_image_index);
void WaitForPresentComplete(std::unique_lock<std::mutex>& lock);
void PresentThread();
void StartPresentThread();
void StopPresentThread();
struct FrameResources
{
// [0] - Init (upload) command buffer, [1] - draw command buffer
VkCommandPool command_pool = VK_NULL_HANDLE;
VkCommandBuffer command_buffer = VK_NULL_HANDLE;
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
VkFence fence = VK_NULL_HANDLE;
u64 fence_counter = 0;
bool needs_fence_wait = false;
bool timestamp_written = false;
std::vector<std::function<void()>> cleanup_resources;
};
VkInstance m_instance = VK_NULL_HANDLE;
VkPhysicalDevice m_physical_device = VK_NULL_HANDLE;
VkDevice m_device = VK_NULL_HANDLE;
VmaAllocator m_allocator = VK_NULL_HANDLE;
VkCommandBuffer m_current_command_buffer = VK_NULL_HANDLE;
VkDescriptorPool m_global_descriptor_pool = VK_NULL_HANDLE;
VkQueue m_graphics_queue = VK_NULL_HANDLE;
u32 m_graphics_queue_family_index = 0;
VkQueue m_present_queue = VK_NULL_HANDLE;
u32 m_present_queue_family_index = 0;
VkQueryPool m_timestamp_query_pool = VK_NULL_HANDLE;
float m_accumulated_gpu_time = 0.0f;
bool m_gpu_timing_enabled = false;
bool m_gpu_timing_supported = false;
std::array<FrameResources, NUM_COMMAND_BUFFERS> m_frame_resources;
u64 m_next_fence_counter = 1;
u64 m_completed_fence_counter = 0;
u32 m_current_frame;
StreamBuffer m_texture_upload_buffer;
std::atomic_bool m_last_present_failed{false};
std::atomic_bool m_present_done{true};
std::mutex m_present_mutex;
std::condition_variable m_present_queued_cv;
std::condition_variable m_present_done_cv;
std::thread m_present_thread;
std::atomic_bool m_present_thread_done{false};
struct QueuedPresent
{
VkSemaphore wait_semaphore;
VkSemaphore signal_semaphore;
VkSwapchainKHR present_swap_chain;
u32 command_buffer_index;
u32 present_image_index;
};
QueuedPresent m_queued_present = {};
// Render pass cache
using RenderPassCacheKey = std::tuple<VkFormat, VkFormat, VkSampleCountFlagBits, VkAttachmentLoadOp>;
std::map<RenderPassCacheKey, VkRenderPass> m_render_pass_cache;
VkDebugUtilsMessengerEXT m_debug_messenger_callback = VK_NULL_HANDLE;
VkQueueFamilyProperties m_graphics_queue_properties = {};
VkPhysicalDeviceFeatures m_device_features = {};
VkPhysicalDeviceProperties m_device_properties = {};
VkPhysicalDeviceMemoryProperties m_device_memory_properties = {};
VkPhysicalDeviceDriverPropertiesKHR m_device_driver_properties = {};
OptionalExtensions m_optional_extensions = {};
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::Context> g_vulkan_context;

View File

@ -1,225 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
// We abuse the preprocessor here to only need to specify function names once.
// Function names are prefixed so to not conflict with system symbols at runtime.
#define VULKAN_MODULE_ENTRY_POINT(name, required) extern PFN_##name ds_##name;
#define VULKAN_INSTANCE_ENTRY_POINT(name, required) extern PFN_##name ds_##name;
#define VULKAN_DEVICE_ENTRY_POINT(name, required) extern PFN_##name ds_##name;
#define VULKAN_DEFINE_NAME_PREFIX ds_
#include "entry_points.inl"
#undef VULKAN_DEFINE_NAME_PREFIX
#undef VULKAN_DEVICE_ENTRY_POINT
#undef VULKAN_INSTANCE_ENTRY_POINT
#undef VULKAN_MODULE_ENTRY_POINT
#ifdef __cplusplus
}
#endif
#define vkCreateInstance ds_vkCreateInstance
#define vkGetInstanceProcAddr ds_vkGetInstanceProcAddr
#define vkEnumerateInstanceExtensionProperties ds_vkEnumerateInstanceExtensionProperties
#define vkEnumerateInstanceLayerProperties ds_vkEnumerateInstanceLayerProperties
#define vkEnumerateInstanceVersion ds_vkEnumerateInstanceVersion
#define vkGetDeviceProcAddr ds_vkGetDeviceProcAddr
#define vkDestroyInstance ds_vkDestroyInstance
#define vkEnumeratePhysicalDevices ds_vkEnumeratePhysicalDevices
#define vkGetPhysicalDeviceFeatures ds_vkGetPhysicalDeviceFeatures
#define vkGetPhysicalDeviceFormatProperties ds_vkGetPhysicalDeviceFormatProperties
#define vkGetPhysicalDeviceImageFormatProperties ds_vkGetPhysicalDeviceImageFormatProperties
#define vkGetPhysicalDeviceProperties ds_vkGetPhysicalDeviceProperties
#define vkGetPhysicalDeviceQueueFamilyProperties ds_vkGetPhysicalDeviceQueueFamilyProperties
#define vkGetPhysicalDeviceMemoryProperties ds_vkGetPhysicalDeviceMemoryProperties
#define vkCreateDevice ds_vkCreateDevice
#define vkEnumerateDeviceExtensionProperties ds_vkEnumerateDeviceExtensionProperties
#define vkEnumerateDeviceLayerProperties ds_vkEnumerateDeviceLayerProperties
#define vkGetPhysicalDeviceSparseImageFormatProperties ds_vkGetPhysicalDeviceSparseImageFormatProperties
#define vkDestroySurfaceKHR ds_vkDestroySurfaceKHR
#define vkGetPhysicalDeviceSurfaceSupportKHR ds_vkGetPhysicalDeviceSurfaceSupportKHR
#define vkGetPhysicalDeviceSurfaceCapabilitiesKHR ds_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
#define vkGetPhysicalDeviceSurfaceFormatsKHR ds_vkGetPhysicalDeviceSurfaceFormatsKHR
#define vkGetPhysicalDeviceSurfacePresentModesKHR ds_vkGetPhysicalDeviceSurfacePresentModesKHR
#define vkCreateWin32SurfaceKHR ds_vkCreateWin32SurfaceKHR
#define vkGetPhysicalDeviceWin32PresentationSupportKHR ds_vkGetPhysicalDeviceWin32PresentationSupportKHR
#define vkCreateXlibSurfaceKHR ds_vkCreateXlibSurfaceKHR
#define vkGetPhysicalDeviceXlibPresentationSupportKHR ds_vkGetPhysicalDeviceXlibPresentationSupportKHR
#define vkCreateWaylandSurfaceKHR ds_vkCreateWaylandSurfaceKHR
#define vkCreateAndroidSurfaceKHR ds_vkCreateAndroidSurfaceKHR
#define vkCreateMacOSSurfaceMVK ds_vkCreateMacOSSurfaceMVK
#define vkCreateMetalSurfaceEXT ds_vkCreateMetalSurfaceEXT
// VK_EXT_debug_utils
#define vkCmdBeginDebugUtilsLabelEXT ds_vkCmdBeginDebugUtilsLabelEXT
#define vkCmdEndDebugUtilsLabelEXT ds_vkCmdEndDebugUtilsLabelEXT
#define vkCmdInsertDebugUtilsLabelEXT ds_vkCmdInsertDebugUtilsLabelEXT
#define vkCreateDebugUtilsMessengerEXT ds_vkCreateDebugUtilsMessengerEXT
#define vkDestroyDebugUtilsMessengerEXT ds_vkDestroyDebugUtilsMessengerEXT
#define vkQueueBeginDebugUtilsLabelEXT ds_vkQueueBeginDebugUtilsLabelEXT
#define vkQueueEndDebugUtilsLabelEXT ds_vkQueueEndDebugUtilsLabelEXT
#define vkQueueInsertDebugUtilsLabelEXT ds_vkQueueInsertDebugUtilsLabelEXT
#define vkSetDebugUtilsObjectNameEXT ds_vkSetDebugUtilsObjectNameEXT
#define vkSetDebugUtilsObjectTagEXT ds_vkSetDebugUtilsObjectTagEXT
#define vkSubmitDebugUtilsMessageEXT ds_vkSubmitDebugUtilsMessageEXT
#define vkGetPhysicalDeviceSurfaceCapabilities2KHR ds_vkGetPhysicalDeviceSurfaceCapabilities2KHR
#define vkGetPhysicalDeviceDisplayPropertiesKHR ds_vkGetPhysicalDeviceDisplayPropertiesKHR
#define vkGetPhysicalDeviceDisplayPlanePropertiesKHR ds_vkGetPhysicalDeviceDisplayPlanePropertiesKHR
#define vkGetDisplayPlaneSupportedDisplaysKHR ds_vkGetDisplayPlaneSupportedDisplaysKHR
#define vkGetDisplayModePropertiesKHR ds_vkGetDisplayModePropertiesKHR
#define vkCreateDisplayModeKHR ds_vkCreateDisplayModeKHR
#define vkGetDisplayPlaneCapabilitiesKHR ds_vkGetDisplayPlaneCapabilitiesKHR
#define vkCreateDisplayPlaneSurfaceKHR ds_vkCreateDisplayPlaneSurfaceKHR
// Vulkan 1.1 functions.
#define vkGetPhysicalDeviceFeatures2 ds_vkGetPhysicalDeviceFeatures2
#define vkGetPhysicalDeviceProperties2 ds_vkGetPhysicalDeviceProperties2
#define vkGetPhysicalDeviceMemoryProperties2 ds_vkGetPhysicalDeviceMemoryProperties2
#define vkDestroyDevice ds_vkDestroyDevice
#define vkGetDeviceQueue ds_vkGetDeviceQueue
#define vkQueueSubmit ds_vkQueueSubmit
#define vkQueueWaitIdle ds_vkQueueWaitIdle
#define vkDeviceWaitIdle ds_vkDeviceWaitIdle
#define vkAllocateMemory ds_vkAllocateMemory
#define vkFreeMemory ds_vkFreeMemory
#define vkMapMemory ds_vkMapMemory
#define vkUnmapMemory ds_vkUnmapMemory
#define vkFlushMappedMemoryRanges ds_vkFlushMappedMemoryRanges
#define vkInvalidateMappedMemoryRanges ds_vkInvalidateMappedMemoryRanges
#define vkGetDeviceMemoryCommitment ds_vkGetDeviceMemoryCommitment
#define vkBindBufferMemory ds_vkBindBufferMemory
#define vkBindImageMemory ds_vkBindImageMemory
#define vkGetBufferMemoryRequirements ds_vkGetBufferMemoryRequirements
#define vkGetImageMemoryRequirements ds_vkGetImageMemoryRequirements
#define vkGetImageSparseMemoryRequirements ds_vkGetImageSparseMemoryRequirements
#define vkQueueBindSparse ds_vkQueueBindSparse
#define vkCreateFence ds_vkCreateFence
#define vkDestroyFence ds_vkDestroyFence
#define vkResetFences ds_vkResetFences
#define vkGetFenceStatus ds_vkGetFenceStatus
#define vkWaitForFences ds_vkWaitForFences
#define vkCreateSemaphore ds_vkCreateSemaphore
#define vkDestroySemaphore ds_vkDestroySemaphore
#define vkCreateEvent ds_vkCreateEvent
#define vkDestroyEvent ds_vkDestroyEvent
#define vkGetEventStatus ds_vkGetEventStatus
#define vkSetEvent ds_vkSetEvent
#define vkResetEvent ds_vkResetEvent
#define vkCreateQueryPool ds_vkCreateQueryPool
#define vkDestroyQueryPool ds_vkDestroyQueryPool
#define vkGetQueryPoolResults ds_vkGetQueryPoolResults
#define vkCreateBuffer ds_vkCreateBuffer
#define vkDestroyBuffer ds_vkDestroyBuffer
#define vkCreateBufferView ds_vkCreateBufferView
#define vkDestroyBufferView ds_vkDestroyBufferView
#define vkCreateImage ds_vkCreateImage
#define vkDestroyImage ds_vkDestroyImage
#define vkGetImageSubresourceLayout ds_vkGetImageSubresourceLayout
#define vkCreateImageView ds_vkCreateImageView
#define vkDestroyImageView ds_vkDestroyImageView
#define vkCreateShaderModule ds_vkCreateShaderModule
#define vkDestroyShaderModule ds_vkDestroyShaderModule
#define vkCreatePipelineCache ds_vkCreatePipelineCache
#define vkDestroyPipelineCache ds_vkDestroyPipelineCache
#define vkGetPipelineCacheData ds_vkGetPipelineCacheData
#define vkMergePipelineCaches ds_vkMergePipelineCaches
#define vkCreateGraphicsPipelines ds_vkCreateGraphicsPipelines
#define vkCreateComputePipelines ds_vkCreateComputePipelines
#define vkDestroyPipeline ds_vkDestroyPipeline
#define vkCreatePipelineLayout ds_vkCreatePipelineLayout
#define vkDestroyPipelineLayout ds_vkDestroyPipelineLayout
#define vkCreateSampler ds_vkCreateSampler
#define vkDestroySampler ds_vkDestroySampler
#define vkCreateDescriptorSetLayout ds_vkCreateDescriptorSetLayout
#define vkDestroyDescriptorSetLayout ds_vkDestroyDescriptorSetLayout
#define vkCreateDescriptorPool ds_vkCreateDescriptorPool
#define vkDestroyDescriptorPool ds_vkDestroyDescriptorPool
#define vkResetDescriptorPool ds_vkResetDescriptorPool
#define vkAllocateDescriptorSets ds_vkAllocateDescriptorSets
#define vkFreeDescriptorSets ds_vkFreeDescriptorSets
#define vkUpdateDescriptorSets ds_vkUpdateDescriptorSets
#define vkCreateFramebuffer ds_vkCreateFramebuffer
#define vkDestroyFramebuffer ds_vkDestroyFramebuffer
#define vkCreateRenderPass ds_vkCreateRenderPass
#define vkDestroyRenderPass ds_vkDestroyRenderPass
#define vkGetRenderAreaGranularity ds_vkGetRenderAreaGranularity
#define vkCreateCommandPool ds_vkCreateCommandPool
#define vkDestroyCommandPool ds_vkDestroyCommandPool
#define vkResetCommandPool ds_vkResetCommandPool
#define vkAllocateCommandBuffers ds_vkAllocateCommandBuffers
#define vkFreeCommandBuffers ds_vkFreeCommandBuffers
#define vkBeginCommandBuffer ds_vkBeginCommandBuffer
#define vkEndCommandBuffer ds_vkEndCommandBuffer
#define vkResetCommandBuffer ds_vkResetCommandBuffer
#define vkCmdBindPipeline ds_vkCmdBindPipeline
#define vkCmdSetViewport ds_vkCmdSetViewport
#define vkCmdSetScissor ds_vkCmdSetScissor
#define vkCmdSetLineWidth ds_vkCmdSetLineWidth
#define vkCmdSetDepthBias ds_vkCmdSetDepthBias
#define vkCmdSetBlendConstants ds_vkCmdSetBlendConstants
#define vkCmdSetDepthBounds ds_vkCmdSetDepthBounds
#define vkCmdSetStencilCompareMask ds_vkCmdSetStencilCompareMask
#define vkCmdSetStencilWriteMask ds_vkCmdSetStencilWriteMask
#define vkCmdSetStencilReference ds_vkCmdSetStencilReference
#define vkCmdBindDescriptorSets ds_vkCmdBindDescriptorSets
#define vkCmdBindIndexBuffer ds_vkCmdBindIndexBuffer
#define vkCmdBindVertexBuffers ds_vkCmdBindVertexBuffers
#define vkCmdDraw ds_vkCmdDraw
#define vkCmdDrawIndexed ds_vkCmdDrawIndexed
#define vkCmdDrawIndirect ds_vkCmdDrawIndirect
#define vkCmdDrawIndexedIndirect ds_vkCmdDrawIndexedIndirect
#define vkCmdDispatch ds_vkCmdDispatch
#define vkCmdDispatchIndirect ds_vkCmdDispatchIndirect
#define vkCmdCopyBuffer ds_vkCmdCopyBuffer
#define vkCmdCopyImage ds_vkCmdCopyImage
#define vkCmdBlitImage ds_vkCmdBlitImage
#define vkCmdCopyBufferToImage ds_vkCmdCopyBufferToImage
#define vkCmdCopyImageToBuffer ds_vkCmdCopyImageToBuffer
#define vkCmdUpdateBuffer ds_vkCmdUpdateBuffer
#define vkCmdFillBuffer ds_vkCmdFillBuffer
#define vkCmdClearColorImage ds_vkCmdClearColorImage
#define vkCmdClearDepthStencilImage ds_vkCmdClearDepthStencilImage
#define vkCmdClearAttachments ds_vkCmdClearAttachments
#define vkCmdResolveImage ds_vkCmdResolveImage
#define vkCmdSetEvent ds_vkCmdSetEvent
#define vkCmdResetEvent ds_vkCmdResetEvent
#define vkCmdWaitEvents ds_vkCmdWaitEvents
#define vkCmdPipelineBarrier ds_vkCmdPipelineBarrier
#define vkCmdBeginQuery ds_vkCmdBeginQuery
#define vkCmdEndQuery ds_vkCmdEndQuery
#define vkCmdResetQueryPool ds_vkCmdResetQueryPool
#define vkCmdWriteTimestamp ds_vkCmdWriteTimestamp
#define vkCmdCopyQueryPoolResults ds_vkCmdCopyQueryPoolResults
#define vkCmdPushConstants ds_vkCmdPushConstants
#define vkCmdBeginRenderPass ds_vkCmdBeginRenderPass
#define vkCmdNextSubpass ds_vkCmdNextSubpass
#define vkCmdEndRenderPass ds_vkCmdEndRenderPass
#define vkCmdExecuteCommands ds_vkCmdExecuteCommands
#define vkCreateSwapchainKHR ds_vkCreateSwapchainKHR
#define vkDestroySwapchainKHR ds_vkDestroySwapchainKHR
#define vkGetSwapchainImagesKHR ds_vkGetSwapchainImagesKHR
#define vkAcquireNextImageKHR ds_vkAcquireNextImageKHR
#define vkQueuePresentKHR ds_vkQueuePresentKHR
// Vulkan 1.1 functions.
#define vkGetBufferMemoryRequirements2 ds_vkGetBufferMemoryRequirements2
#define vkGetImageMemoryRequirements2 ds_vkGetImageMemoryRequirements2
#define vkBindBufferMemory2 ds_vkBindBufferMemory2
#define vkBindImageMemory2 ds_vkBindImageMemory2
#ifdef SUPPORTS_VULKAN_EXCLUSIVE_FULLSCREEN
#define vkAcquireFullScreenExclusiveModeEXT ds_vkAcquireFullScreenExclusiveModeEXT
#define vkReleaseFullScreenExclusiveModeEXT ds_vkReleaseFullScreenExclusiveModeEXT
#endif
// Vulkan 1.3 functions.
#define vkGetDeviceBufferMemoryRequirements ds_vkGetDeviceBufferMemoryRequirements
#define vkGetDeviceImageMemoryRequirements ds_vkGetDeviceImageMemoryRequirements

View File

@ -1,525 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_cache.h"
#include "../assert.h"
#include "../file_system.h"
#include "../log.h"
#include "../md5_digest.h"
#include "context.h"
#include "shader_compiler.h"
#include "util.h"
Log_SetChannel(Vulkan::ShaderCache);
// TODO: store the driver version and stuff in the shader header
std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;
namespace Vulkan {
using ShaderCompiler::SPIRVCodeType;
using ShaderCompiler::SPIRVCodeVector;
#pragma pack(push, 4)
struct VK_PIPELINE_CACHE_HEADER
{
u32 header_length;
u32 header_version;
u32 vendor_id;
u32 device_id;
u8 uuid[VK_UUID_SIZE];
};
struct CacheIndexEntry
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
u32 shader_type;
u32 file_offset;
u32 blob_size;
};
#pragma pack(pop)
static bool ValidatePipelineCacheHeader(const VK_PIPELINE_CACHE_HEADER& header)
{
if (header.header_length < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Log_ErrorPrintf("Pipeline cache failed validation: Invalid header length");
return false;
}
if (header.header_version != VK_PIPELINE_CACHE_HEADER_VERSION_ONE)
{
Log_ErrorPrintf("Pipeline cache failed validation: Invalid header version");
return false;
}
if (header.vendor_id != g_vulkan_context->GetDeviceProperties().vendorID)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect vendor ID (file: 0x%X, device: 0x%X)",
header.vendor_id, g_vulkan_context->GetDeviceProperties().vendorID);
return false;
}
if (header.device_id != g_vulkan_context->GetDeviceProperties().deviceID)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect device ID (file: 0x%X, device: 0x%X)",
header.device_id, g_vulkan_context->GetDeviceProperties().deviceID);
return false;
}
if (std::memcmp(header.uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE) != 0)
{
Log_ErrorPrintf("Pipeline cache failed validation: Incorrect UUID");
return false;
}
return true;
}
static void FillPipelineCacheHeader(VK_PIPELINE_CACHE_HEADER* header)
{
header->header_length = sizeof(VK_PIPELINE_CACHE_HEADER);
header->header_version = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
header->vendor_id = g_vulkan_context->GetDeviceProperties().vendorID;
header->device_id = g_vulkan_context->GetDeviceProperties().deviceID;
std::memcpy(header->uuid, g_vulkan_context->GetDeviceProperties().pipelineCacheUUID, VK_UUID_SIZE);
}
ShaderCache::ShaderCache() = default;
ShaderCache::~ShaderCache()
{
CloseShaderCache();
FlushPipelineCache();
ClosePipelineCache();
}
bool ShaderCache::CacheIndexKey::operator==(const CacheIndexKey& key) const
{
return (source_hash_low == key.source_hash_low && source_hash_high == key.source_hash_high &&
source_length == key.source_length && shader_type == key.shader_type);
}
bool ShaderCache::CacheIndexKey::operator!=(const CacheIndexKey& key) const
{
return (source_hash_low != key.source_hash_low || source_hash_high != key.source_hash_high ||
source_length != key.source_length || shader_type != key.shader_type);
}
void ShaderCache::Create(std::string_view base_path, u32 version, bool debug)
{
Assert(!g_vulkan_shader_cache);
g_vulkan_shader_cache.reset(new ShaderCache());
g_vulkan_shader_cache->Open(base_path, version, debug);
}
void ShaderCache::Destroy()
{
g_vulkan_shader_cache.reset();
}
void ShaderCache::Open(std::string_view base_path, u32 version, bool debug)
{
m_version = version;
m_debug = debug;
if (!base_path.empty())
{
m_pipeline_cache_filename = GetPipelineCacheBaseFileName(base_path, debug);
const std::string base_filename = GetShaderCacheBaseFileName(base_path, debug);
const std::string index_filename = base_filename + ".idx";
const std::string blob_filename = base_filename + ".bin";
if (!ReadExistingShaderCache(index_filename, blob_filename))
CreateNewShaderCache(index_filename, blob_filename);
if (!ReadExistingPipelineCache())
CreateNewPipelineCache();
}
else
{
CreateNewPipelineCache();
}
}
VkPipelineCache ShaderCache::GetPipelineCache(bool set_dirty /*= true*/)
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return VK_NULL_HANDLE;
m_pipeline_cache_dirty |= set_dirty;
return m_pipeline_cache;
}
bool ShaderCache::CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
if (FileSystem::FileExists(index_filename.c_str()))
{
Log_WarningPrintf("Removing existing index file '%s'", index_filename.c_str());
FileSystem::DeleteFile(index_filename.c_str());
}
if (FileSystem::FileExists(blob_filename.c_str()))
{
Log_WarningPrintf("Removing existing blob file '%s'", blob_filename.c_str());
FileSystem::DeleteFile(blob_filename.c_str());
}
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "wb");
if (!m_index_file)
{
Log_ErrorPrintf("Failed to open index file '%s' for writing", index_filename.c_str());
return false;
}
const u32 index_version = FILE_VERSION;
VK_PIPELINE_CACHE_HEADER header;
FillPipelineCacheHeader(&header);
if (std::fwrite(&index_version, sizeof(index_version), 1, m_index_file) != 1 ||
std::fwrite(&m_version, sizeof(m_version), 1, m_index_file) != 1 ||
std::fwrite(&header, sizeof(header), 1, m_index_file) != 1)
{
Log_ErrorPrintf("Failed to write header to index file '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "w+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Failed to open blob file '%s' for writing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
FileSystem::DeleteFile(index_filename.c_str());
return false;
}
return true;
}
bool ShaderCache::ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename)
{
m_index_file = FileSystem::OpenCFile(index_filename.c_str(), "r+b");
if (!m_index_file)
return false;
u32 file_version = 0;
u32 data_version = 0;
if (std::fread(&file_version, sizeof(file_version), 1, m_index_file) != 1 || file_version != FILE_VERSION ||
std::fread(&data_version, sizeof(data_version), 1, m_index_file) != 1 || data_version != m_version)
{
Log_ErrorPrintf("Bad file/data version in '%s'", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
VK_PIPELINE_CACHE_HEADER header;
if (std::fread(&header, sizeof(header), 1, m_index_file) != 1 || !ValidatePipelineCacheHeader(header))
{
Log_ErrorPrintf("Mismatched pipeline cache header in '%s' (GPU/driver changed?)", index_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
m_blob_file = FileSystem::OpenCFile(blob_filename.c_str(), "a+b");
if (!m_blob_file)
{
Log_ErrorPrintf("Blob file '%s' is missing", blob_filename.c_str());
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
std::fseek(m_blob_file, 0, SEEK_END);
const u32 blob_file_size = static_cast<u32>(std::ftell(m_blob_file));
for (;;)
{
CacheIndexEntry entry;
if (std::fread(&entry, sizeof(entry), 1, m_index_file) != 1 ||
(entry.file_offset + entry.blob_size) > blob_file_size)
{
if (std::feof(m_index_file))
break;
Log_ErrorPrintf("Failed to read entry from '%s', corrupt file?", index_filename.c_str());
m_index.clear();
std::fclose(m_blob_file);
m_blob_file = nullptr;
std::fclose(m_index_file);
m_index_file = nullptr;
return false;
}
const CacheIndexKey key{entry.source_hash_low, entry.source_hash_high, entry.source_length,
static_cast<ShaderCompiler::Type>(entry.shader_type)};
const CacheIndexData data{entry.file_offset, entry.blob_size};
m_index.emplace(key, data);
}
// ensure we don't write before seeking
std::fseek(m_index_file, 0, SEEK_END);
Log_InfoPrintf("Read %zu entries from '%s'", m_index.size(), index_filename.c_str());
return true;
}
void ShaderCache::CloseShaderCache()
{
if (m_index_file)
{
std::fclose(m_index_file);
m_index_file = nullptr;
}
if (m_blob_file)
{
std::fclose(m_blob_file);
m_blob_file = nullptr;
}
}
bool ShaderCache::CreateNewPipelineCache()
{
if (!m_pipeline_cache_filename.empty() && FileSystem::FileExists(m_pipeline_cache_filename.c_str()))
{
Log_WarningPrintf("Removing existing pipeline cache '%s'", m_pipeline_cache_filename.c_str());
FileSystem::DeleteFile(m_pipeline_cache_filename.c_str());
}
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, 0, nullptr};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
m_pipeline_cache_dirty = true;
return true;
}
bool ShaderCache::ReadExistingPipelineCache()
{
std::optional<std::vector<u8>> data = FileSystem::ReadBinaryFile(m_pipeline_cache_filename.c_str());
if (!data.has_value())
return false;
if (data->size() < sizeof(VK_PIPELINE_CACHE_HEADER))
{
Log_ErrorPrintf("Pipeline cache at '%s' is too small", m_pipeline_cache_filename.c_str());
return false;
}
VK_PIPELINE_CACHE_HEADER header;
std::memcpy(&header, data->data(), sizeof(header));
if (!ValidatePipelineCacheHeader(header))
return false;
const VkPipelineCacheCreateInfo ci{VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, nullptr, 0, data->size(),
data->data()};
VkResult res = vkCreatePipelineCache(g_vulkan_context->GetDevice(), &ci, nullptr, &m_pipeline_cache);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreatePipelineCache() failed: ");
return false;
}
return true;
}
bool ShaderCache::FlushPipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE || !m_pipeline_cache_dirty || m_pipeline_cache_filename.empty())
return false;
size_t data_size;
VkResult res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() failed: ");
return false;
}
std::vector<u8> data(data_size);
res = vkGetPipelineCacheData(g_vulkan_context->GetDevice(), m_pipeline_cache, &data_size, data.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPipelineCacheData() (2) failed: ");
return false;
}
data.resize(data_size);
// Save disk writes if it hasn't changed, think of the poor SSDs.
FILESYSTEM_STAT_DATA sd;
if (!FileSystem::StatFile(m_pipeline_cache_filename.c_str(), &sd) || sd.Size != static_cast<s64>(data_size))
{
Log_InfoPrintf("Writing %zu bytes to '%s'", data_size, m_pipeline_cache_filename.c_str());
if (!FileSystem::WriteBinaryFile(m_pipeline_cache_filename.c_str(), data.data(), data.size()))
{
Log_ErrorPrintf("Failed to write pipeline cache to '%s'", m_pipeline_cache_filename.c_str());
return false;
}
}
else
{
Log_InfoPrintf("Skipping updating pipeline cache '%s' due to no changes.", m_pipeline_cache_filename.c_str());
}
m_pipeline_cache_dirty = false;
return true;
}
void ShaderCache::ClosePipelineCache()
{
if (m_pipeline_cache == VK_NULL_HANDLE)
return;
vkDestroyPipelineCache(g_vulkan_context->GetDevice(), m_pipeline_cache, nullptr);
m_pipeline_cache = VK_NULL_HANDLE;
}
std::string ShaderCache::GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "vulkan_shaders";
if (debug)
base_filename += "_debug";
return base_filename;
}
std::string ShaderCache::GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug)
{
std::string base_filename(base_path);
base_filename += FS_OSPATH_SEPARATOR_STR "vulkan_pipelines";
if (debug)
base_filename += "_debug";
base_filename += ".bin";
return base_filename;
}
ShaderCache::CacheIndexKey ShaderCache::GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code)
{
union HashParts
{
struct
{
u64 hash_low;
u64 hash_high;
};
u8 hash[16];
};
HashParts h;
MD5Digest digest;
digest.Update(shader_code.data(), static_cast<u32>(shader_code.length()));
digest.Final(h.hash);
return CacheIndexKey{h.hash_low, h.hash_high, static_cast<u32>(shader_code.length()), type};
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::GetShaderSPV(ShaderCompiler::Type type,
std::string_view shader_code)
{
const auto key = GetCacheKey(type, shader_code);
auto iter = m_index.find(key);
if (iter == m_index.end())
return CompileAndAddShaderSPV(key, shader_code);
SPIRVCodeVector spv(iter->second.blob_size);
if (std::fseek(m_blob_file, iter->second.file_offset, SEEK_SET) != 0 ||
std::fread(spv.data(), sizeof(SPIRVCodeType), iter->second.blob_size, m_blob_file) != iter->second.blob_size)
{
Log_ErrorPrintf("Read blob from file failed, recompiling");
return ShaderCompiler::CompileShader(type, shader_code, m_debug);
}
return spv;
}
VkShaderModule ShaderCache::GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = GetShaderSPV(type, shader_code);
if (!spv.has_value())
return VK_NULL_HANDLE;
const VkShaderModuleCreateInfo ci{VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, nullptr, 0,
spv->size() * sizeof(SPIRVCodeType), spv->data()};
VkShaderModule mod;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &ci, nullptr, &mod);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule() failed: ");
return VK_NULL_HANDLE;
}
return mod;
}
VkShaderModule ShaderCache::GetVertexShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Vertex, std::move(shader_code));
}
VkShaderModule ShaderCache::GetGeometryShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Geometry, std::move(shader_code));
}
VkShaderModule ShaderCache::GetFragmentShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Fragment, std::move(shader_code));
}
VkShaderModule ShaderCache::GetComputeShader(std::string_view shader_code)
{
return GetShaderModule(ShaderCompiler::Type::Compute, std::move(shader_code));
}
std::optional<ShaderCompiler::SPIRVCodeVector> ShaderCache::CompileAndAddShaderSPV(const CacheIndexKey& key,
std::string_view shader_code)
{
std::optional<SPIRVCodeVector> spv = ShaderCompiler::CompileShader(key.shader_type, shader_code, m_debug);
if (!spv.has_value())
return {};
if (!m_blob_file || std::fseek(m_blob_file, 0, SEEK_END) != 0)
return spv;
CacheIndexData data;
data.file_offset = static_cast<u32>(std::ftell(m_blob_file));
data.blob_size = static_cast<u32>(spv->size());
CacheIndexEntry entry = {};
entry.source_hash_low = key.source_hash_low;
entry.source_hash_high = key.source_hash_high;
entry.source_length = key.source_length;
entry.shader_type = static_cast<u32>(key.shader_type);
entry.blob_size = data.blob_size;
entry.file_offset = data.file_offset;
if (std::fwrite(spv->data(), sizeof(SPIRVCodeType), entry.blob_size, m_blob_file) != entry.blob_size ||
std::fflush(m_blob_file) != 0 || std::fwrite(&entry, sizeof(entry), 1, m_index_file) != 1 ||
std::fflush(m_index_file) != 0)
{
Log_ErrorPrintf("Failed to write shader blob to file");
return spv;
}
m_index.emplace(key, data);
return spv;
}
} // namespace Vulkan

View File

@ -1,106 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../hash_combine.h"
#include "../types.h"
#include "loader.h"
#include "shader_compiler.h"
#include <cstdio>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <vector>
namespace Vulkan {
class ShaderCache
{
public:
~ShaderCache();
static void Create(std::string_view base_path, u32 version, bool debug);
static void Destroy();
/// Returns a handle to the pipeline cache. Set set_dirty to true if you are planning on writing to it externally.
VkPipelineCache GetPipelineCache(bool set_dirty = true);
/// Writes pipeline cache to file, saving all newly compiled pipelines.
bool FlushPipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> GetShaderSPV(ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetShaderModule(ShaderCompiler::Type type, std::string_view shader_code);
VkShaderModule GetVertexShader(std::string_view shader_code);
VkShaderModule GetGeometryShader(std::string_view shader_code);
VkShaderModule GetFragmentShader(std::string_view shader_code);
VkShaderModule GetComputeShader(std::string_view shader_code);
private:
static constexpr u32 FILE_VERSION = 2;
struct CacheIndexKey
{
u64 source_hash_low;
u64 source_hash_high;
u32 source_length;
ShaderCompiler::Type shader_type;
bool operator==(const CacheIndexKey& key) const;
bool operator!=(const CacheIndexKey& key) const;
};
struct CacheIndexEntryHasher
{
std::size_t operator()(const CacheIndexKey& e) const noexcept
{
std::size_t h = 0;
hash_combine(h, e.source_hash_low, e.source_hash_high, e.source_length, e.shader_type);
return h;
}
};
struct CacheIndexData
{
u32 file_offset;
u32 blob_size;
};
using CacheIndex = std::unordered_map<CacheIndexKey, CacheIndexData, CacheIndexEntryHasher>;
ShaderCache();
static std::string GetShaderCacheBaseFileName(const std::string_view& base_path, bool debug);
static std::string GetPipelineCacheBaseFileName(const std::string_view& base_path, bool debug);
static CacheIndexKey GetCacheKey(ShaderCompiler::Type type, const std::string_view& shader_code);
void Open(std::string_view base_path, u32 version, bool debug);
bool CreateNewShaderCache(const std::string& index_filename, const std::string& blob_filename);
bool ReadExistingShaderCache(const std::string& index_filename, const std::string& blob_filename);
void CloseShaderCache();
bool CreateNewPipelineCache();
bool ReadExistingPipelineCache();
void ClosePipelineCache();
std::optional<ShaderCompiler::SPIRVCodeVector> CompileAndAddShaderSPV(const CacheIndexKey& key,
std::string_view shader_code);
std::FILE* m_index_file = nullptr;
std::FILE* m_blob_file = nullptr;
std::string m_pipeline_cache_filename;
CacheIndex m_index;
VkPipelineCache m_pipeline_cache = VK_NULL_HANDLE;
u32 m_version = 0;
bool m_debug = false;
bool m_pipeline_cache_dirty = false;
};
} // namespace Vulkan
extern std::unique_ptr<Vulkan::ShaderCache> g_vulkan_shader_cache;

View File

@ -1,181 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "shader_compiler.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include "util.h"
#include <cstring>
#include <fstream>
#include <memory>
Log_SetChannel(Vulkan::ShaderCompiler);
// glslang includes
#include "SPIRV/GlslangToSpv.h"
#include "StandAlone/ResourceLimits.h"
#include "glslang/Public/ShaderLang.h"
namespace Vulkan::ShaderCompiler {
// Registers itself for cleanup via atexit
bool InitializeGlslang();
static unsigned s_next_bad_shader_id = 1;
static bool glslang_initialized = false;
static std::optional<SPIRVCodeVector> CompileShaderToSPV(EShLanguage stage, const char* stage_filename,
std::string_view source)
{
if (!InitializeGlslang())
return std::nullopt;
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage);
std::unique_ptr<glslang::TProgram> program;
glslang::TShader::ForbidIncluder includer;
EProfile profile = ECoreProfile;
EShMessages messages = static_cast<EShMessages>(EShMsgDefault | EShMsgSpvRules | EShMsgVulkanRules);
int default_version = 450;
std::string full_source_code;
const char* pass_source_code = source.data();
int pass_source_code_length = static_cast<int>(source.size());
shader->setStringsWithLengths(&pass_source_code, &pass_source_code_length, 1);
auto DumpBadShader = [&](const char* msg) {
std::string filename = StringUtil::StdStringFromFormat("bad_shader_%u.txt", s_next_bad_shader_id++);
Log::Writef("Vulkan", "CompileShaderToSPV", LOGLEVEL_ERROR, "%s, writing to %s", msg, filename.c_str());
std::ofstream ofs(filename.c_str(), std::ofstream::out | std::ofstream::binary);
if (ofs.is_open())
{
ofs << source;
ofs << "\n";
ofs << msg << std::endl;
ofs << "Shader Info Log:" << std::endl;
ofs << shader->getInfoLog() << std::endl;
ofs << shader->getInfoDebugLog() << std::endl;
if (program)
{
ofs << "Program Info Log:" << std::endl;
ofs << program->getInfoLog() << std::endl;
ofs << program->getInfoDebugLog() << std::endl;
}
ofs.close();
}
};
if (!shader->parse(&glslang::DefaultTBuiltInResource, default_version, profile, false, true, messages, includer))
{
DumpBadShader("Failed to parse shader");
return std::nullopt;
}
// Even though there's only a single shader, we still need to link it to generate SPV
program = std::make_unique<glslang::TProgram>();
program->addShader(shader.get());
if (!program->link(messages))
{
DumpBadShader("Failed to link program");
return std::nullopt;
}
glslang::TIntermediate* intermediate = program->getIntermediate(stage);
if (!intermediate)
{
DumpBadShader("Failed to generate SPIR-V");
return std::nullopt;
}
SPIRVCodeVector out_code;
spv::SpvBuildLogger logger;
glslang::GlslangToSpv(*intermediate, out_code, &logger);
// Write out messages
// Temporary: skip if it contains "Warning, version 450 is not yet complete; most version-specific
// features are present, but some are missing."
if (std::strlen(shader->getInfoLog()) > 108)
Log_WarningPrintf("Shader info log: %s", shader->getInfoLog());
if (std::strlen(shader->getInfoDebugLog()) > 0)
Log_WarningPrintf("Shader debug info log: %s", shader->getInfoDebugLog());
if (std::strlen(program->getInfoLog()) > 25)
Log_WarningPrintf("Program info log: %s", program->getInfoLog());
if (std::strlen(program->getInfoDebugLog()) > 0)
Log_WarningPrintf("Program debug info log: %s", program->getInfoDebugLog());
std::string spv_messages = logger.getAllMessages();
if (!spv_messages.empty())
Log_WarningPrintf("SPIR-V conversion messages: %s", spv_messages.c_str());
return out_code;
}
bool InitializeGlslang()
{
if (glslang_initialized)
return true;
if (!glslang::InitializeProcess())
{
Panic("Failed to initialize glslang shader compiler");
return false;
}
std::atexit([]() { glslang::FinalizeProcess(); });
glslang_initialized = true;
return true;
}
void DeinitializeGlslang()
{
if (!glslang_initialized)
return;
glslang::FinalizeProcess();
glslang_initialized = false;
}
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangVertex, "vs", source_code);
}
std::optional<SPIRVCodeVector> CompileGeometryShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangGeometry, "gs", source_code);
}
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangFragment, "ps", source_code);
}
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code)
{
return CompileShaderToSPV(EShLangCompute, "cs", source_code);
}
std::optional<ShaderCompiler::SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug)
{
switch (type)
{
case Type::Vertex:
return CompileShaderToSPV(EShLangVertex, "vs", source_code);
case Type::Geometry:
return CompileShaderToSPV(EShLangGeometry, "gs", source_code);
case Type::Fragment:
return CompileShaderToSPV(EShLangFragment, "ps", source_code);
case Type::Compute:
return CompileShaderToSPV(EShLangCompute, "cs", source_code);
default:
return std::nullopt;
}
}
} // namespace Vulkan::ShaderCompiler

View File

@ -1,42 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include <optional>
#include <string_view>
#include <vector>
namespace Vulkan::ShaderCompiler {
// Shader types
enum class Type
{
Vertex,
Geometry,
Fragment,
Compute
};
void DeinitializeGlslang();
// SPIR-V compiled code type
using SPIRVCodeType = u32;
using SPIRVCodeVector = std::vector<SPIRVCodeType>;
// Compile a vertex shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileVertexShader(std::string_view source_code);
// Compile a geometry shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileGeometryShader(std::string_view source_code);
// Compile a fragment shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileFragmentShader(std::string_view source_code);
// Compile a compute shader to SPIR-V.
std::optional<SPIRVCodeVector> CompileComputeShader(std::string_view source_code);
std::optional<SPIRVCodeVector> CompileShader(Type type, std::string_view source_code, bool debug);
} // namespace Vulkan::ShaderCompiler

View File

@ -1,890 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "swap_chain.h"
#include "../assert.h"
#include "../log.h"
#include "context.h"
#include "util.h"
#include <algorithm>
#include <array>
#include <cmath>
Log_SetChannel(Vulkan::SwapChain);
#if defined(VK_USE_PLATFORM_XLIB_KHR)
#include <X11/Xlib.h>
#endif
#if defined(__APPLE__)
#include <dispatch/dispatch.h>
#include <objc/message.h>
static bool IsMainThread()
{
Class clsNSThread = objc_getClass("NSThread");
if (!clsNSThread)
return false;
return reinterpret_cast<BOOL (*)(Class, SEL)>(objc_msgSend)(clsNSThread, sel_getUid("isMainThread"));
}
static bool CreateMetalLayer(WindowInfo* wi)
{
if (!IsMainThread())
{
struct MainThreadParams
{
WindowInfo* wi;
bool result;
};
MainThreadParams params = {wi, false};
dispatch_sync_f(dispatch_get_main_queue(), &params, [](void* vparams) {
MainThreadParams* params = static_cast<MainThreadParams*>(vparams);
params->result = CreateMetalLayer(params->wi);
});
return params.result;
}
id view = reinterpret_cast<id>(wi->window_handle);
Class clsCAMetalLayer = objc_getClass("CAMetalLayer");
if (!clsCAMetalLayer)
{
Log_ErrorPrint("Failed to get CAMetalLayer class.");
return false;
}
// [CAMetalLayer layer]
id layer = reinterpret_cast<id (*)(Class, SEL)>(objc_msgSend)(clsCAMetalLayer, sel_getUid("layer"));
if (!layer)
{
Log_ErrorPrint("Failed to create Metal layer.");
return false;
}
// [view setWantsLayer:YES]
reinterpret_cast<void (*)(id, SEL, BOOL)>(objc_msgSend)(view, sel_getUid("setWantsLayer:"), YES);
// [view setLayer:layer]
reinterpret_cast<void (*)(id, SEL, id)>(objc_msgSend)(view, sel_getUid("setLayer:"), layer);
// NSScreen* screen = [NSScreen mainScreen]
id screen = reinterpret_cast<id (*)(Class, SEL)>(objc_msgSend)(objc_getClass("NSScreen"), sel_getUid("mainScreen"));
// CGFloat factor = [screen backingScaleFactor]
double factor = reinterpret_cast<double (*)(id, SEL)>(objc_msgSend)(screen, sel_getUid("backingScaleFactor"));
// layer.contentsScale = factor
reinterpret_cast<void (*)(id, SEL, double)>(objc_msgSend)(layer, sel_getUid("setContentsScale:"), factor);
// Store the layer pointer, that way MoltenVK doesn't call [NSView layer] outside the main thread.
wi->surface_handle = layer;
return true;
}
static void DestroyMetalLayer(WindowInfo* wi)
{
if (!IsMainThread())
{
dispatch_sync_f(dispatch_get_main_queue(), wi, [](void* wi) { DestroyMetalLayer(static_cast<WindowInfo*>(wi)); });
return;
}
id view = reinterpret_cast<id>(wi->window_handle);
id layer = reinterpret_cast<id>(wi->surface_handle);
if (layer == nil)
return;
reinterpret_cast<void (*)(id, SEL, id)>(objc_msgSend)(view, sel_getUid("setLayer:"), nil);
reinterpret_cast<void (*)(id, SEL, BOOL)>(objc_msgSend)(view, sel_getUid("setWantsLayer:"), NO);
wi->surface_handle = nullptr;
}
#endif
namespace Vulkan {
SwapChain::SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync)
: m_window_info(wi), m_surface(surface), m_vsync_enabled(vsync)
{
}
SwapChain::~SwapChain()
{
DestroySemaphores();
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
}
static VkSurfaceKHR CreateDisplaySurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi)
{
Log_InfoPrintf("Trying to create a VK_KHR_display surface of %ux%u", wi->surface_width, wi->surface_height);
u32 num_displays;
VkResult res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, nullptr);
if (res != VK_SUCCESS || num_displays == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
std::vector<VkDisplayPropertiesKHR> displays(num_displays);
res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, displays.data());
if (res != VK_SUCCESS || num_displays != displays.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
for (u32 display_index = 0; display_index < num_displays; display_index++)
{
const VkDisplayPropertiesKHR& props = displays[display_index];
Log_DevPrintf("Testing display '%s'", props.displayName);
u32 num_modes;
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, nullptr);
if (res != VK_SUCCESS || num_modes == 0)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
std::vector<VkDisplayModePropertiesKHR> modes(num_modes);
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, modes.data());
if (res != VK_SUCCESS || num_modes != modes.size())
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
const VkDisplayModePropertiesKHR* matched_mode = nullptr;
for (const VkDisplayModePropertiesKHR& mode : modes)
{
const float refresh_rate = static_cast<float>(mode.parameters.refreshRate) / 1000.0f;
Log_DevPrintf(" Mode %ux%u @ %f", mode.parameters.visibleRegion.width, mode.parameters.visibleRegion.height,
refresh_rate);
if (!matched_mode &&
((wi->surface_width == 0 && wi->surface_height == 0) ||
(mode.parameters.visibleRegion.width == wi->surface_width &&
mode.parameters.visibleRegion.height == wi->surface_height &&
(wi->surface_refresh_rate == 0.0f || std::abs(refresh_rate - wi->surface_refresh_rate) < 0.1f))))
{
matched_mode = &mode;
}
}
if (!matched_mode)
{
Log_DevPrintf("No modes matched on '%s'", props.displayName);
continue;
}
u32 num_planes;
res = vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physical_device, &num_planes, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR() failed:");
continue;
}
if (num_planes == 0)
continue;
std::vector<VkDisplayPlanePropertiesKHR> planes(num_planes);
res = vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physical_device, &num_planes, planes.data());
if (res != VK_SUCCESS || num_planes != planes.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR() failed:");
continue;
}
u32 plane_index = 0;
for (; plane_index < num_planes; plane_index++)
{
u32 supported_display_count;
res = vkGetDisplayPlaneSupportedDisplaysKHR(physical_device, plane_index, &supported_display_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayPlaneSupportedDisplaysKHR() failed:");
continue;
}
if (supported_display_count == 0)
continue;
std::vector<VkDisplayKHR> supported_displays(supported_display_count);
res = vkGetDisplayPlaneSupportedDisplaysKHR(physical_device, plane_index, &supported_display_count,
supported_displays.data());
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayPlaneSupportedDisplaysKHR() failed:");
continue;
}
const bool is_supported =
std::find(supported_displays.begin(), supported_displays.end(), props.display) != supported_displays.end();
if (!is_supported)
continue;
break;
}
if (plane_index == num_planes)
{
Log_DevPrintf("No planes matched on '%s'", props.displayName);
continue;
}
VkDisplaySurfaceCreateInfoKHR info = {};
info.sType = VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR;
info.displayMode = matched_mode->displayMode;
info.planeIndex = plane_index;
info.planeStackIndex = planes[plane_index].currentStackIndex;
info.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
info.globalAlpha = 1.0f;
info.alphaMode = VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR;
info.imageExtent = matched_mode->parameters.visibleRegion;
VkSurfaceKHR surface;
res = vkCreateDisplayPlaneSurfaceKHR(instance, &info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateDisplayPlaneSurfaceKHR() failed: ");
continue;
}
wi->surface_refresh_rate = static_cast<float>(matched_mode->parameters.refreshRate) / 1000.0f;
return surface;
}
return VK_NULL_HANDLE;
}
static std::vector<SwapChain::FullscreenModeInfo> GetDisplayModes(VkInstance instance, VkPhysicalDevice physical_device,
const WindowInfo& wi)
{
u32 num_displays;
VkResult res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
if (num_displays == 0)
{
Log_ErrorPrint("No displays were returned");
return {};
}
std::vector<VkDisplayPropertiesKHR> displays(num_displays);
res = vkGetPhysicalDeviceDisplayPropertiesKHR(physical_device, &num_displays, displays.data());
if (res != VK_SUCCESS || num_displays != displays.size())
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceDisplayPropertiesKHR() failed:");
return {};
}
std::vector<SwapChain::FullscreenModeInfo> result;
for (u32 display_index = 0; display_index < num_displays; display_index++)
{
const VkDisplayPropertiesKHR& props = displays[display_index];
u32 num_modes;
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, nullptr);
if (res != VK_SUCCESS || num_modes == 0)
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
std::vector<VkDisplayModePropertiesKHR> modes(num_modes);
res = vkGetDisplayModePropertiesKHR(physical_device, props.display, &num_modes, modes.data());
if (res != VK_SUCCESS || num_modes != modes.size())
{
LOG_VULKAN_ERROR(res, "vkGetDisplayModePropertiesKHR() failed:");
continue;
}
for (const VkDisplayModePropertiesKHR& mode : modes)
{
const float refresh_rate = static_cast<float>(mode.parameters.refreshRate) / 1000.0f;
if (std::find_if(result.begin(), result.end(), [&mode, refresh_rate](const SwapChain::FullscreenModeInfo& mi) {
return (mi.width == mode.parameters.visibleRegion.width &&
mi.height == mode.parameters.visibleRegion.height && mode.parameters.refreshRate == refresh_rate);
}) != result.end())
{
continue;
}
result.push_back(SwapChain::FullscreenModeInfo{static_cast<u32>(mode.parameters.visibleRegion.width),
static_cast<u32>(mode.parameters.visibleRegion.height),
refresh_rate});
}
}
return result;
}
VkSurfaceKHR SwapChain::CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi)
{
#if defined(VK_USE_PLATFORM_WIN32_KHR)
if (wi->type == WindowInfo::Type::Win32)
{
VkWin32SurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkWin32SurfaceCreateFlagsKHR flags
nullptr, // HINSTANCE hinstance
reinterpret_cast<HWND>(wi->window_handle) // HWND hwnd
};
VkSurfaceKHR surface;
VkResult res = vkCreateWin32SurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWin32SurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_XLIB_KHR)
if (wi->type == WindowInfo::Type::X11)
{
VkXlibSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkXlibSurfaceCreateFlagsKHR flags
static_cast<Display*>(wi->display_connection), // Display* dpy
reinterpret_cast<Window>(wi->window_handle) // Window window
};
VkSurfaceKHR surface;
VkResult res = vkCreateXlibSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateXlibSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
if (wi->type == WindowInfo::Type::Wayland)
{
VkWaylandSurfaceCreateInfoKHR surface_create_info = {VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, nullptr, 0,
static_cast<struct wl_display*>(wi->display_connection),
static_cast<struct wl_surface*>(wi->window_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateWaylandSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateWaylandSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
if (wi->type == WindowInfo::Type::Android)
{
VkAndroidSurfaceCreateInfoKHR surface_create_info = {
VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAndroidSurfaceCreateFlagsKHR flags
reinterpret_cast<ANativeWindow*>(wi->window_handle) // ANativeWindow* window
};
VkSurfaceKHR surface;
VkResult res = vkCreateAndroidSurfaceKHR(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateAndroidSurfaceKHR failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
#if defined(VK_USE_PLATFORM_METAL_EXT)
if (wi->type == WindowInfo::Type::MacOS)
{
if (!wi->surface_handle && !CreateMetalLayer(wi))
return VK_NULL_HANDLE;
VkMetalSurfaceCreateInfoEXT surface_create_info = {VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT, nullptr, 0,
static_cast<const CAMetalLayer*>(wi->surface_handle)};
VkSurfaceKHR surface;
VkResult res = vkCreateMetalSurfaceEXT(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMetalSurfaceEXT failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#elif defined(VK_USE_PLATFORM_MACOS_MVK)
if (wi->type == WindowInfo::Type::MacOS)
{
VkMacOSSurfaceCreateInfoMVK surface_create_info = {VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, nullptr, 0,
wi->window_handle};
VkSurfaceKHR surface;
VkResult res = vkCreateMacOSSurfaceMVK(instance, &surface_create_info, nullptr, &surface);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateMacOSSurfaceMVK failed: ");
return VK_NULL_HANDLE;
}
return surface;
}
#endif
if (wi->type == WindowInfo::Type::Display)
return CreateDisplaySurface(instance, physical_device, wi);
return VK_NULL_HANDLE;
}
void SwapChain::DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface)
{
vkDestroySurfaceKHR(g_vulkan_context->GetVulkanInstance(), surface, nullptr);
#if defined(__APPLE__)
if (wi->type == WindowInfo::Type::MacOS && wi->surface_handle)
DestroyMetalLayer(wi);
#endif
}
std::vector<SwapChain::FullscreenModeInfo>
SwapChain::GetSurfaceFullscreenModes(VkInstance instance, VkPhysicalDevice physical_device, const WindowInfo& wi)
{
if (wi.type == WindowInfo::Type::Display)
return GetDisplayModes(instance, physical_device, wi);
return {};
}
std::unique_ptr<SwapChain> SwapChain::Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync)
{
std::unique_ptr<SwapChain> swap_chain = std::make_unique<SwapChain>(wi, surface, vsync);
if (!swap_chain->CreateSwapChain() || !swap_chain->SetupSwapChainImages() || !swap_chain->CreateSemaphores())
return nullptr;
return swap_chain;
}
bool SwapChain::SelectSurfaceFormat()
{
u32 format_count;
VkResult res =
vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr);
if (res != VK_SUCCESS || format_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkSurfaceFormatKHR> surface_formats(format_count);
res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count,
surface_formats.data());
Assert(res == VK_SUCCESS);
// If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA
if (surface_formats[0].format == VK_FORMAT_UNDEFINED)
{
m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM;
m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
return true;
}
// Try to find a suitable format.
for (const VkSurfaceFormatKHR& surface_format : surface_formats)
{
// Some drivers seem to return a SRGB format here (Intel Mesa).
// This results in gamma correction when presenting to the screen, which we don't want.
// Use a linear format instead, if this is the case.
m_surface_format.format = Util::GetLinearFormat(surface_format.format);
m_surface_format.colorSpace = surface_format.colorSpace;
return true;
}
Panic("Failed to find a suitable format for swap chain buffers.");
return false;
}
bool SwapChain::SelectPresentMode()
{
VkResult res;
u32 mode_count;
res =
vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count, nullptr);
if (res != VK_SUCCESS || mode_count == 0)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: ");
return false;
}
std::vector<VkPresentModeKHR> present_modes(mode_count);
res = vkGetPhysicalDeviceSurfacePresentModesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &mode_count,
present_modes.data());
Assert(res == VK_SUCCESS);
// Checks if a particular mode is supported, if it is, returns that mode.
auto CheckForMode = [&present_modes](VkPresentModeKHR check_mode) {
auto it = std::find_if(present_modes.begin(), present_modes.end(),
[check_mode](VkPresentModeKHR mode) { return check_mode == mode; });
return it != present_modes.end();
};
// If vsync is enabled, use VK_PRESENT_MODE_FIFO_KHR.
// This check should not fail with conforming drivers, as the FIFO present mode is mandated by
// the specification (VK_KHR_swapchain). In case it isn't though, fall through to any other mode.
if (m_vsync_enabled && CheckForMode(VK_PRESENT_MODE_FIFO_KHR))
{
m_present_mode = VK_PRESENT_MODE_FIFO_KHR;
return true;
}
// Prefer screen-tearing, if possible, for lowest latency.
if (CheckForMode(VK_PRESENT_MODE_IMMEDIATE_KHR))
{
m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
return true;
}
// Use optimized-vsync above vsync.
if (CheckForMode(VK_PRESENT_MODE_MAILBOX_KHR))
{
m_present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
return true;
}
// Fall back to whatever is available.
m_present_mode = present_modes[0];
return true;
}
bool SwapChain::CreateSwapChain()
{
// Look up surface properties to determine image count and dimensions
VkSurfaceCapabilitiesKHR surface_capabilities;
VkResult res =
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &surface_capabilities);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR failed: ");
return false;
}
// Select swap chain format and present mode
if (!SelectSurfaceFormat() || !SelectPresentMode())
return false;
// Select number of images in swap chain, we prefer one buffer in the background to work on
u32 image_count = std::max(surface_capabilities.minImageCount, 2u);
// maxImageCount can be zero, in which case there isn't an upper limit on the number of buffers.
if (surface_capabilities.maxImageCount > 0)
image_count = std::min(image_count, surface_capabilities.maxImageCount);
// Determine the dimensions of the swap chain. Values of -1 indicate the size we specify here
// determines window size?
VkExtent2D size = surface_capabilities.currentExtent;
#ifndef ANDROID
if (size.width == UINT32_MAX)
#endif
{
size.width = m_window_info.surface_width;
size.height = m_window_info.surface_height;
}
size.width =
std::clamp(size.width, surface_capabilities.minImageExtent.width, surface_capabilities.maxImageExtent.width);
size.height =
std::clamp(size.height, surface_capabilities.minImageExtent.height, surface_capabilities.maxImageExtent.height);
// Prefer identity transform if possible
VkSurfaceTransformFlagBitsKHR transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
if (!(surface_capabilities.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR))
transform = surface_capabilities.currentTransform;
// Select swap chain flags, we only need a colour attachment
VkImageUsageFlags image_usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (!(surface_capabilities.supportedUsageFlags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT))
{
Log_ErrorPrintf("Vulkan: Swap chain does not support usage as color attachment");
return false;
}
// Store the old/current swap chain when recreating for resize
VkSwapchainKHR old_swap_chain = m_swap_chain;
m_swap_chain = VK_NULL_HANDLE;
// Now we can actually create the swap chain
VkSwapchainCreateInfoKHR swap_chain_info = {VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
nullptr,
0,
m_surface,
image_count,
m_surface_format.format,
m_surface_format.colorSpace,
size,
1u,
image_usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
transform,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
m_present_mode,
VK_TRUE,
old_swap_chain};
std::array<uint32_t, 2> indices = {{
g_vulkan_context->GetGraphicsQueueFamilyIndex(),
g_vulkan_context->GetPresentQueueFamilyIndex(),
}};
if (g_vulkan_context->GetGraphicsQueueFamilyIndex() != g_vulkan_context->GetPresentQueueFamilyIndex())
{
swap_chain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
swap_chain_info.queueFamilyIndexCount = 2;
swap_chain_info.pQueueFamilyIndices = indices.data();
}
if (m_swap_chain == VK_NULL_HANDLE)
{
res = vkCreateSwapchainKHR(g_vulkan_context->GetDevice(), &swap_chain_info, nullptr, &m_swap_chain);
}
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSwapchainKHR failed: ");
return false;
}
// Now destroy the old swap chain, since it's been recreated.
// We can do this immediately since all work should have been completed before calling resize.
if (old_swap_chain != VK_NULL_HANDLE)
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), old_swap_chain, nullptr);
m_window_info.surface_width = std::max(1u, size.width);
m_window_info.surface_height = std::max(1u, size.height);
return true;
}
bool SwapChain::SetupSwapChainImages()
{
Assert(m_images.empty());
u32 image_count;
VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: ");
return false;
}
std::vector<VkImage> images(image_count);
res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data());
Assert(res == VK_SUCCESS);
m_load_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED,
VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD);
m_clear_render_pass = g_vulkan_context->GetRenderPass(m_surface_format.format, VK_FORMAT_UNDEFINED,
VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR);
if (m_load_render_pass == VK_NULL_HANDLE || m_clear_render_pass == VK_NULL_HANDLE)
{
Panic("Failed to get swap chain render passes.");
return false;
}
m_images.reserve(image_count);
for (u32 i = 0; i < image_count; i++)
{
SwapChainImage image;
image.image = images[i];
// Create texture object, which creates a view of the backbuffer
if (!image.texture.Adopt(image.image, VK_IMAGE_VIEW_TYPE_2D, m_window_info.surface_width,
m_window_info.surface_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_LAYOUT_UNDEFINED))
{
return false;
}
image.framebuffer = image.texture.CreateFramebuffer(m_load_render_pass);
if (image.framebuffer == VK_NULL_HANDLE)
return false;
m_images.emplace_back(std::move(image));
}
return true;
}
void SwapChain::DestroySwapChainImages()
{
for (auto& it : m_images)
{
// Images themselves are cleaned up by the swap chain object
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), it.framebuffer, nullptr);
}
m_images.clear();
}
void SwapChain::DestroySwapChain()
{
if (m_swap_chain == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(g_vulkan_context->GetDevice(), m_swap_chain, nullptr);
m_swap_chain = VK_NULL_HANDLE;
}
VkResult SwapChain::AcquireNextImage()
{
if (!m_swap_chain)
return VK_ERROR_SURFACE_LOST_KHR;
return vkAcquireNextImageKHR(g_vulkan_context->GetDevice(), m_swap_chain, UINT64_MAX, m_image_available_semaphore,
VK_NULL_HANDLE, &m_current_image);
}
bool SwapChain::ResizeSwapChain(u32 new_width /* = 0 */, u32 new_height /* = 0 */)
{
DestroySwapChainImages();
if (new_width != 0 && new_height != 0)
{
m_window_info.surface_width = new_width;
m_window_info.surface_height = new_height;
}
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool SwapChain::RecreateSwapChain()
{
DestroySwapChainImages();
if (!CreateSwapChain() || !SetupSwapChainImages())
{
DestroySwapChainImages();
DestroySwapChain();
return false;
}
return true;
}
bool SwapChain::SetVSync(bool enabled)
{
if (m_vsync_enabled == enabled)
return true;
// Recreate the swap chain with the new present mode.
m_vsync_enabled = enabled;
return RecreateSwapChain();
}
bool SwapChain::RecreateSurface(const WindowInfo& new_wi)
{
// Destroy the old swap chain, images, and surface.
DestroySwapChainImages();
DestroySwapChain();
DestroySurface();
// Re-create the surface with the new native handle
m_window_info = new_wi;
m_surface =
CreateVulkanSurface(g_vulkan_context->GetVulkanInstance(), g_vulkan_context->GetPhysicalDevice(), &m_window_info);
if (m_surface == VK_NULL_HANDLE)
return false;
// The validation layers get angry at us if we don't call this before creating the swapchain.
VkBool32 present_supported = VK_TRUE;
VkResult res =
vkGetPhysicalDeviceSurfaceSupportKHR(g_vulkan_context->GetPhysicalDevice(),
g_vulkan_context->GetPresentQueueFamilyIndex(), m_surface, &present_supported);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceSupportKHR failed: ");
return false;
}
if (!present_supported)
{
Panic("Recreated surface does not support presenting.");
return false;
}
// Finally re-create the swap chain
if (!CreateSwapChain() || !SetupSwapChainImages())
return false;
return true;
}
void SwapChain::DestroySurface()
{
if (m_surface == VK_NULL_HANDLE)
return;
DestroyVulkanSurface(g_vulkan_context->GetVulkanInstance(), &m_window_info, m_surface);
m_surface = VK_NULL_HANDLE;
}
bool SwapChain::CreateSemaphores()
{
// Create two semaphores, one that is triggered when the swapchain buffer is ready, another after
// submit and before present
VkSemaphoreCreateInfo semaphore_info = {
VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, // VkStructureType sType
nullptr, // const void* pNext
0 // VkSemaphoreCreateFlags flags
};
VkResult res;
if ((res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr,
&m_image_available_semaphore)) != VK_SUCCESS ||
(res = vkCreateSemaphore(g_vulkan_context->GetDevice(), &semaphore_info, nullptr,
&m_rendering_finished_semaphore)) != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateSemaphore failed: ");
return false;
}
return true;
}
void SwapChain::DestroySemaphores()
{
if (m_image_available_semaphore != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_image_available_semaphore, nullptr);
m_image_available_semaphore = VK_NULL_HANDLE;
}
if (m_rendering_finished_semaphore != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), m_rendering_finished_semaphore, nullptr);
m_rendering_finished_semaphore = VK_NULL_HANDLE;
}
}
} // namespace Vulkan

View File

@ -1,107 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../types.h"
#include "../window_info.h"
#include "texture.h"
#include "loader.h"
#include <memory>
#include <vector>
namespace Vulkan {
class SwapChain
{
public:
SwapChain(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync);
~SwapChain();
// Creates a vulkan-renderable surface for the specified window handle.
static VkSurfaceKHR CreateVulkanSurface(VkInstance instance, VkPhysicalDevice physical_device, WindowInfo* wi);
// Destroys a previously-created surface.
static void DestroyVulkanSurface(VkInstance instance, WindowInfo* wi, VkSurfaceKHR surface);
// Enumerates fullscreen modes for window info.
struct FullscreenModeInfo
{
u32 width;
u32 height;
float refresh_rate;
};
static std::vector<FullscreenModeInfo>
GetSurfaceFullscreenModes(VkInstance instance, VkPhysicalDevice physical_device, const WindowInfo& wi);
// Create a new swap chain from a pre-existing surface.
static std::unique_ptr<SwapChain> Create(const WindowInfo& wi, VkSurfaceKHR surface, bool vsync);
ALWAYS_INLINE VkSurfaceKHR GetSurface() const { return m_surface; }
ALWAYS_INLINE VkSurfaceFormatKHR GetSurfaceFormat() const { return m_surface_format; }
ALWAYS_INLINE VkFormat GetTextureFormat() const { return m_surface_format.format; }
ALWAYS_INLINE bool IsVSyncEnabled() const { return m_vsync_enabled; }
ALWAYS_INLINE VkSwapchainKHR GetSwapChain() const { return m_swap_chain; }
ALWAYS_INLINE const WindowInfo& GetWindowInfo() const { return m_window_info; }
ALWAYS_INLINE u32 GetWidth() const { return m_window_info.surface_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_window_info.surface_height; }
ALWAYS_INLINE u32 GetCurrentImageIndex() const { return m_current_image; }
ALWAYS_INLINE u32 GetImageCount() const { return static_cast<u32>(m_images.size()); }
ALWAYS_INLINE VkImage GetCurrentImage() const { return m_images[m_current_image].image; }
ALWAYS_INLINE const Texture& GetCurrentTexture() const { return m_images[m_current_image].texture; }
ALWAYS_INLINE Texture& GetCurrentTexture() { return m_images[m_current_image].texture; }
ALWAYS_INLINE VkFramebuffer GetCurrentFramebuffer() const { return m_images[m_current_image].framebuffer; }
ALWAYS_INLINE VkRenderPass GetLoadRenderPass() const { return m_load_render_pass; }
ALWAYS_INLINE VkRenderPass GetClearRenderPass() const { return m_clear_render_pass; }
ALWAYS_INLINE VkSemaphore GetImageAvailableSemaphore() const { return m_image_available_semaphore; }
ALWAYS_INLINE VkSemaphore GetRenderingFinishedSemaphore() const { return m_rendering_finished_semaphore; }
VkResult AcquireNextImage();
bool RecreateSurface(const WindowInfo& new_wi);
bool ResizeSwapChain(u32 new_width = 0, u32 new_height = 0);
bool RecreateSwapChain();
// Change vsync enabled state. This may fail as it causes a swapchain recreation.
bool SetVSync(bool enabled);
private:
bool SelectSurfaceFormat();
bool SelectPresentMode();
bool CreateSwapChain();
void DestroySwapChain();
bool SetupSwapChainImages();
void DestroySwapChainImages();
void DestroySurface();
bool CreateSemaphores();
void DestroySemaphores();
struct SwapChainImage
{
VkImage image;
Texture texture;
VkFramebuffer framebuffer;
};
WindowInfo m_window_info;
VkSurfaceKHR m_surface = VK_NULL_HANDLE;
VkSurfaceFormatKHR m_surface_format = {};
VkPresentModeKHR m_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
VkRenderPass m_load_render_pass = VK_NULL_HANDLE;
VkRenderPass m_clear_render_pass = VK_NULL_HANDLE;
VkSemaphore m_image_available_semaphore = VK_NULL_HANDLE;
VkSemaphore m_rendering_finished_semaphore = VK_NULL_HANDLE;
VkSwapchainKHR m_swap_chain = VK_NULL_HANDLE;
std::vector<SwapChainImage> m_images;
u32 m_current_image = 0;
bool m_vsync_enabled = false;
};
} // namespace Vulkan

View File

@ -1,534 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "texture.h"
#include "../align.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include "context.h"
#include "util.h"
#include <algorithm>
Log_SetChannel(Texture);
static constexpr std::array<VkFormat, static_cast<u32>(GPUTexture::Format::Count)> s_vk_mapping = {
{VK_FORMAT_UNDEFINED, VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16,
VK_FORMAT_A1R5G5B5_UNORM_PACK16, VK_FORMAT_R8_UNORM, VK_FORMAT_D16_UNORM}};
static constexpr VkComponentMapping s_identity_swizzle{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY};
Vulkan::Texture::Texture() = default;
Vulkan::Texture::Texture(Texture&& move)
: m_view_type(move.m_view_type), m_layout(move.m_layout), m_image(move.m_image), m_allocation(move.m_allocation),
m_view(move.m_view)
{
m_width = move.m_width;
m_height = move.m_height;
m_layers = move.m_layers;
m_levels = move.m_levels;
m_samples = move.m_samples;
move.ClearBaseProperties();
move.m_view_type = VK_IMAGE_VIEW_TYPE_2D;
move.m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
move.m_image = VK_NULL_HANDLE;
move.m_allocation = VK_NULL_HANDLE;
move.m_view = VK_NULL_HANDLE;
}
Vulkan::Texture::~Texture()
{
if (IsValid())
Destroy(true);
}
VkFormat Vulkan::Texture::GetVkFormat(Format format)
{
return s_vk_mapping[static_cast<u8>(format)];
}
GPUTexture::Format Vulkan::Texture::LookupBaseFormat(VkFormat vformat)
{
for (u32 i = 0; i < static_cast<u32>(s_vk_mapping.size()); i++)
{
if (s_vk_mapping[i] == vformat)
return static_cast<Format>(i);
}
return GPUTexture::Format::Unknown;
}
bool Vulkan::Texture::IsValid() const
{
return (m_image != VK_NULL_HANDLE);
}
Vulkan::Texture& Vulkan::Texture::operator=(Texture&& move)
{
if (IsValid())
Destroy(true);
std::swap(m_width, move.m_width);
std::swap(m_height, move.m_height);
std::swap(m_levels, move.m_levels);
std::swap(m_layers, move.m_layers);
std::swap(m_format, move.m_format);
std::swap(m_samples, move.m_samples);
std::swap(m_view_type, move.m_view_type);
std::swap(m_layout, move.m_layout);
std::swap(m_image, move.m_image);
std::swap(m_allocation, move.m_allocation);
std::swap(m_view, move.m_view);
return *this;
}
bool Vulkan::Texture::Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format,
VkSampleCountFlagBits samples, VkImageViewType view_type, VkImageTiling tiling,
VkImageUsageFlags usage, bool dedicated_memory /* = false */,
const VkComponentMapping* swizzle /* = nullptr */)
{
const VkImageCreateInfo image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
0,
VK_IMAGE_TYPE_2D,
format,
{width, height, 1},
levels,
layers,
samples,
tiling,
usage,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VmaAllocationCreateInfo aci = {};
aci.usage = VMA_MEMORY_USAGE_GPU_ONLY;
aci.flags = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT;
aci.requiredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
if (dedicated_memory)
aci.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
VkImage image = VK_NULL_HANDLE;
VmaAllocation allocation = VK_NULL_HANDLE;
VkResult res = vmaCreateImage(g_vulkan_context->GetAllocator(), &image_info, &aci, &image, &allocation, nullptr);
if (res != VK_SUCCESS && dedicated_memory)
{
// try without dedicated memory
aci.flags &= ~VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
res = vmaCreateImage(g_vulkan_context->GetAllocator(), &image_info, &aci, &image, &allocation, nullptr);
}
if (res == VK_ERROR_OUT_OF_DEVICE_MEMORY)
{
Log_WarningPrintf("Failed to allocate device memory for %ux%u texture", width, height);
return false;
}
else if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vmaCreateImage failed: ");
return false;
}
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image,
view_type,
format,
swizzle ? *swizzle : s_identity_swizzle,
{Util::IsDepthFormat(format) ?
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
VkImageView view = VK_NULL_HANDLE;
res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
vmaDestroyImage(g_vulkan_context->GetAllocator(), image, allocation);
return false;
}
if (IsValid())
Destroy(true);
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_levels = static_cast<u8>(levels);
m_layers = static_cast<u8>(layers);
m_samples = static_cast<u8>(samples);
m_format = LookupBaseFormat(format);
m_view_type = view_type;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
m_image = image;
m_allocation = allocation;
m_view = view;
return true;
}
bool Vulkan::Texture::Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels,
u32 layers, VkFormat format, VkSampleCountFlagBits samples, VkImageLayout layout,
const VkComponentMapping* swizzle /* = nullptr */)
{
// Only need to create the image view, this is mainly for swap chains.
const VkImageViewCreateInfo view_info = {VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
existing_image,
view_type,
format,
swizzle ? *swizzle : s_identity_swizzle,
{Util::IsDepthFormat(format) ?
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_DEPTH_BIT) :
static_cast<VkImageAspectFlags>(VK_IMAGE_ASPECT_COLOR_BIT),
0, levels, 0, layers}};
// Memory is managed by the owner of the image.
VkImageView view = VK_NULL_HANDLE;
VkResult res = vkCreateImageView(g_vulkan_context->GetDevice(), &view_info, nullptr, &view);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateImageView failed: ");
return false;
}
if (IsValid())
Destroy(true);
m_width = static_cast<u16>(width);
m_height = static_cast<u16>(height);
m_levels = static_cast<u8>(levels);
m_layers = static_cast<u8>(layers);
m_format = LookupBaseFormat(format);
m_samples = static_cast<u8>(samples);
m_view_type = view_type;
m_layout = layout;
m_image = existing_image;
m_view = view;
return true;
}
void Vulkan::Texture::Destroy(bool defer /* = true */)
{
if (m_view != VK_NULL_HANDLE)
{
if (defer)
g_vulkan_context->DeferImageViewDestruction(m_view);
else
vkDestroyImageView(g_vulkan_context->GetDevice(), m_view, nullptr);
m_view = VK_NULL_HANDLE;
}
// If we don't have device memory allocated, the image is not owned by us (e.g. swapchain)
if (m_allocation != VK_NULL_HANDLE)
{
Assert(m_image != VK_NULL_HANDLE);
if (defer)
g_vulkan_context->DeferImageDestruction(m_image, m_allocation);
else
vmaDestroyImage(g_vulkan_context->GetAllocator(), m_image, m_allocation);
m_image = VK_NULL_HANDLE;
m_allocation = VK_NULL_HANDLE;
}
ClearBaseProperties();
m_samples = VK_SAMPLE_COUNT_1_BIT;
m_view_type = VK_IMAGE_VIEW_TYPE_2D;
m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
void Vulkan::Texture::OverrideImageLayout(VkImageLayout new_layout)
{
m_layout = new_layout;
}
void Vulkan::Texture::TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout)
{
if (m_layout == new_layout)
return;
const Vulkan::Util::DebugScope debugScope(command_buffer, "Texture::TransitionToLayout: %s",
Vulkan::Util::VkImageLayoutToString(new_layout));
TransitionSubresourcesToLayout(command_buffer, 0, m_levels, 0, m_layers, m_layout, new_layout);
m_layout = new_layout;
}
void Vulkan::Texture::TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels,
u32 start_layer, u32 num_layers, VkImageLayout old_layout,
VkImageLayout new_layout)
{
const Vulkan::Util::DebugScope debugScope(
command_buffer, "Texture::TransitionSubresourcesToLayout: Lvl:[%u,%u) Lyr:[%u,%u) %s -> %s", start_level,
start_level + num_levels, start_layer, start_layer + num_layers, Vulkan::Util::VkImageLayoutToString(old_layout),
Vulkan::Util::VkImageLayoutToString(new_layout));
VkImageMemoryBarrier barrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
0, // VkAccessFlags srcAccessMask
0, // VkAccessFlags dstAccessMask
old_layout, // VkImageLayout oldLayout
new_layout, // VkImageLayout newLayout
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
m_image, // VkImage image
{static_cast<VkImageAspectFlags>(IsDepthFormat(m_format) ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
start_level, num_levels, start_layer, num_layers} // VkImageSubresourceRange subresourceRange
};
// srcStageMask -> Stages that must complete before the barrier
// dstStageMask -> Stages that must wait for after the barrier before beginning
VkPipelineStageFlags srcStageMask, dstStageMask;
switch (old_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Layout undefined therefore contents undefined, and we don't care what happens to it.
barrier.srcAccessMask = 0;
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image has been pre-initialized by the host, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_HOST_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image was being used as a color attachment, so ensure all writes have completed.
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image was being used as a depthstencil attachment, so ensure all writes have completed.
barrier.srcAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image was being used as a shader resource, make sure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image was being used as a copy source, ensure all reads have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image was being used as a copy destination, ensure all writes have finished.
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
srcStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
default:
srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
}
switch (new_layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
barrier.dstAccessMask = 0;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
barrier.dstAccessMask =
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
break;
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
break;
default:
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
break;
}
vkCmdPipelineBarrier(command_buffer, srcStageMask, dstStageMask, 0, 0, nullptr, 0, nullptr, 1, &barrier);
}
VkFramebuffer Vulkan::Texture::CreateFramebuffer(VkRenderPass render_pass)
{
const VkFramebufferCreateInfo ci = {
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0u, render_pass, 1, &m_view, m_width, m_height, m_layers};
VkFramebuffer fb = VK_NULL_HANDLE;
VkResult res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &ci, nullptr, &fb);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateFramebuffer() failed: ");
return VK_NULL_HANDLE;
}
return fb;
}
void Vulkan::Texture::UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width,
u32 height, VkBuffer buffer, u32 buffer_offset, u32 row_length)
{
// If we're previously undefined, don't leave any images in this layout.
const VkImageLayout old_layout = m_layout;
if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED)
TransitionToLayout(cmdbuf, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
else if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
TransitionSubresourcesToLayout(cmdbuf, level, 1, layer, 1, old_layout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkBufferImageCopy bic = {static_cast<VkDeviceSize>(buffer_offset),
row_length,
height,
{VK_IMAGE_ASPECT_COLOR_BIT, level, layer, 1u},
{static_cast<int32_t>(x), static_cast<int32_t>(y), 0},
{width, height, 1u}};
vkCmdCopyBufferToImage(cmdbuf, buffer, m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &bic);
if (old_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && old_layout != VK_IMAGE_LAYOUT_UNDEFINED)
TransitionSubresourcesToLayout(cmdbuf, level, 1, layer, 1, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, old_layout);
}
u32 Vulkan::Texture::CalcUpdatePitch(u32 width) const
{
return Common::AlignUp(width * GetPixelSize(), g_vulkan_context->GetBufferCopyRowPitchAlignment());
}
u32 Vulkan::Texture::CalcUpdateRowLength(u32 pitch) const
{
return pitch / GetPixelSize();
}
bool Vulkan::Texture::BeginUpdate(u32 width, u32 height, void** out_buffer, u32* out_pitch)
{
const u32 pitch = CalcUpdatePitch(width);
const u32 required_size = pitch * height;
StreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
if (required_size > buffer.GetCurrentSize())
return false;
// TODO: allocate temporary buffer if this fails...
if (!buffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
{
g_vulkan_context->ExecuteCommandBuffer(false);
if (!buffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
return false;
}
*out_buffer = buffer.GetCurrentHostPointer();
*out_pitch = pitch;
return true;
}
void Vulkan::Texture::EndUpdate(u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer)
{
const u32 pitch = CalcUpdatePitch(width);
const u32 required_size = pitch * height;
StreamBuffer& buffer = g_vulkan_context->GetTextureUploadBuffer();
const u32 buffer_offset = buffer.GetCurrentOffset();
buffer.CommitMemory(required_size);
UpdateFromBuffer(g_vulkan_context->GetCurrentCommandBuffer(), level, layer, x, y, width, height, buffer.GetBuffer(),
buffer_offset, CalcUpdateRowLength(pitch));
}
bool Vulkan::Texture::Update(u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer, const void* data,
u32 data_pitch)
{
const u32 pitch = CalcUpdatePitch(width);
const u32 row_length = CalcUpdateRowLength(pitch);
const u32 required_size = pitch * height;
StreamBuffer& sbuffer = g_vulkan_context->GetTextureUploadBuffer();
// If the texture is larger than half our streaming buffer size, use a separate buffer.
// Otherwise allocation will either fail, or require lots of cmdbuffer submissions.
if (required_size > (g_vulkan_context->GetTextureUploadBuffer().GetCurrentSize() / 2))
{
const u32 size = data_pitch * height;
const VkBufferCreateInfo bci = {VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
nullptr,
0,
static_cast<VkDeviceSize>(size),
VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr};
// Don't worry about setting the coherent bit for this upload, the main reason we had
// that set in StreamBuffer was for MoltenVK, which would upload the whole buffer on
// smaller uploads, but we're writing to the whole thing anyway.
VmaAllocationCreateInfo aci = {};
aci.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT;
aci.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
VmaAllocationInfo ai;
VkBuffer buffer;
VmaAllocation allocation;
VkResult res = vmaCreateBuffer(g_vulkan_context->GetAllocator(), &bci, &aci, &buffer, &allocation, &ai);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vmaCreateBuffer() failed: ");
return false;
}
// Immediately queue it for freeing after the command buffer finishes, since it's only needed for the copy.
g_vulkan_context->DeferBufferDestruction(buffer, allocation);
StringUtil::StrideMemCpy(ai.pMappedData, pitch, data, data_pitch, std::min(data_pitch, pitch), height);
vmaFlushAllocation(g_vulkan_context->GetAllocator(), allocation, 0, size);
UpdateFromBuffer(g_vulkan_context->GetCurrentCommandBuffer(), level, layer, x, y, width, height, buffer, 0,
row_length);
return true;
}
else
{
if (!sbuffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
{
g_vulkan_context->ExecuteCommandBuffer(false);
if (!sbuffer.ReserveMemory(required_size, g_vulkan_context->GetBufferCopyOffsetAlignment()))
{
Log_ErrorPrintf("Failed to reserve texture upload memory (%u bytes).", required_size);
return false;
}
}
const u32 buffer_offset = sbuffer.GetCurrentOffset();
StringUtil::StrideMemCpy(sbuffer.GetCurrentHostPointer(), pitch, data, data_pitch, std::min(data_pitch, pitch),
height);
sbuffer.CommitMemory(required_size);
UpdateFromBuffer(g_vulkan_context->GetCurrentCommandBuffer(), level, layer, x, y, width, height,
sbuffer.GetBuffer(), buffer_offset, row_length);
return true;
}
}

View File

@ -1,83 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../gpu_texture.h"
#include "loader.h"
#include <algorithm>
#include <memory>
namespace Vulkan {
class Texture final : public GPUTexture
{
public:
Texture();
Texture(Texture&& move);
Texture(const Texture&) = delete;
~Texture();
Texture& operator=(Texture&& move);
Texture& operator=(const Texture&) = delete;
static VkFormat GetVkFormat(Format format);
static Format LookupBaseFormat(VkFormat vformat);
bool IsValid() const override;
/// An image is considered owned/managed if we control the memory.
ALWAYS_INLINE bool IsOwned() const { return (m_allocation != VK_NULL_HANDLE); }
ALWAYS_INLINE u32 GetWidth() const { return m_width; }
ALWAYS_INLINE u32 GetHeight() const { return m_height; }
ALWAYS_INLINE u32 GetLevels() const { return m_levels; }
ALWAYS_INLINE u32 GetLayers() const { return m_layers; }
ALWAYS_INLINE VkFormat GetVkFormat() const { return GetVkFormat(m_format); }
ALWAYS_INLINE VkSampleCountFlagBits GetVkSamples() const { return static_cast<VkSampleCountFlagBits>(m_samples); }
ALWAYS_INLINE VkImageLayout GetLayout() const { return m_layout; }
ALWAYS_INLINE VkImageViewType GetViewType() const { return m_view_type; }
ALWAYS_INLINE VkImage GetImage() const { return m_image; }
ALWAYS_INLINE VmaAllocation GetAllocation() const { return m_allocation; }
ALWAYS_INLINE VkImageView GetView() const { return m_view; }
bool Create(u32 width, u32 height, u32 levels, u32 layers, VkFormat format, VkSampleCountFlagBits samples,
VkImageViewType view_type, VkImageTiling tiling, VkImageUsageFlags usage, bool dedicated_memory = false,
const VkComponentMapping* swizzle = nullptr);
bool Adopt(VkImage existing_image, VkImageViewType view_type, u32 width, u32 height, u32 levels, u32 layers,
VkFormat format, VkSampleCountFlagBits samples, VkImageLayout layout,
const VkComponentMapping* swizzle = nullptr);
void Destroy(bool defer = true);
// Used when the render pass is changing the image layout, or to force it to
// VK_IMAGE_LAYOUT_UNDEFINED, if the existing contents of the image is
// irrelevant and will not be loaded.
void OverrideImageLayout(VkImageLayout new_layout);
void TransitionToLayout(VkCommandBuffer command_buffer, VkImageLayout new_layout);
void TransitionSubresourcesToLayout(VkCommandBuffer command_buffer, u32 start_level, u32 num_levels, u32 start_layer,
u32 num_layers, VkImageLayout old_layout, VkImageLayout new_layout);
VkFramebuffer CreateFramebuffer(VkRenderPass render_pass);
void UpdateFromBuffer(VkCommandBuffer cmdbuf, u32 level, u32 layer, u32 x, u32 y, u32 width, u32 height,
VkBuffer buffer, u32 buffer_offset, u32 row_length);
u32 CalcUpdatePitch(u32 width) const;
u32 CalcUpdateRowLength(u32 pitch) const;
bool BeginUpdate(u32 width, u32 height, void** out_buffer, u32* out_pitch);
void EndUpdate(u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer);
bool Update(u32 x, u32 y, u32 width, u32 height, u32 level, u32 layer, const void* data, u32 data_pitch);
private:
VkImageViewType m_view_type = VK_IMAGE_VIEW_TYPE_2D;
VkImageLayout m_layout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImage m_image = VK_NULL_HANDLE;
VmaAllocation m_allocation = VK_NULL_HANDLE;
VkImageView m_view = VK_NULL_HANDLE;
};
} // namespace Vulkan

View File

@ -1,602 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "util.h"
#include "../assert.h"
#include "../log.h"
#include "../string_util.h"
#include "context.h"
#include "shader_compiler.h"
#include <cmath>
bool Vulkan::Util::IsDepthFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_D16_UNORM:
case VK_FORMAT_D16_UNORM_S8_UINT:
case VK_FORMAT_D24_UNORM_S8_UINT:
case VK_FORMAT_D32_SFLOAT:
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return true;
default:
return false;
}
}
bool Vulkan::Util::IsCompressedFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return true;
default:
return false;
}
}
VkFormat Vulkan::Util::GetLinearFormat(VkFormat format)
{
switch (format)
{
case VK_FORMAT_R8_SRGB:
return VK_FORMAT_R8_UNORM;
case VK_FORMAT_R8G8_SRGB:
return VK_FORMAT_R8G8_UNORM;
case VK_FORMAT_R8G8B8_SRGB:
return VK_FORMAT_R8G8B8_UNORM;
case VK_FORMAT_R8G8B8A8_SRGB:
return VK_FORMAT_R8G8B8A8_UNORM;
case VK_FORMAT_B8G8R8_SRGB:
return VK_FORMAT_B8G8R8_UNORM;
case VK_FORMAT_B8G8R8A8_SRGB:
return VK_FORMAT_B8G8R8A8_UNORM;
default:
return format;
}
}
u32 Vulkan::Util::GetTexelSize(VkFormat format)
{
// Only contains pixel formats we use.
switch (format)
{
case VK_FORMAT_R32_SFLOAT:
return 4;
case VK_FORMAT_D32_SFLOAT:
return 4;
case VK_FORMAT_R8G8B8A8_UNORM:
return 4;
case VK_FORMAT_B8G8R8A8_UNORM:
return 4;
case VK_FORMAT_R5G5B5A1_UNORM_PACK16:
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
case VK_FORMAT_R5G6B5_UNORM_PACK16:
case VK_FORMAT_B5G6R5_UNORM_PACK16:
return 2;
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
return 8;
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 16;
default:
Panic("Unhandled pixel format");
return 1;
}
}
u32 Vulkan::Util::GetBlockSize(VkFormat format)
{
switch (format)
{
case VK_FORMAT_BC1_RGBA_UNORM_BLOCK:
case VK_FORMAT_BC2_UNORM_BLOCK:
case VK_FORMAT_BC3_UNORM_BLOCK:
case VK_FORMAT_BC7_UNORM_BLOCK:
return 4;
default:
return 1;
}
}
VkRect2D Vulkan::Util::ClampRect2D(const VkRect2D& rect, u32 width, u32 height)
{
VkRect2D out;
out.offset.x = std::clamp(rect.offset.x, 0, static_cast<int>(width - 1));
out.offset.y = std::clamp(rect.offset.y, 0, static_cast<int>(height - 1));
out.extent.width = std::min(rect.extent.width, width - static_cast<int>(rect.offset.x));
out.extent.height = std::min(rect.extent.height, height - static_cast<int>(rect.offset.y));
return out;
}
VkBlendFactor Vulkan::Util::GetAlphaBlendFactor(VkBlendFactor factor)
{
switch (factor)
{
case VK_BLEND_FACTOR_SRC_COLOR:
return VK_BLEND_FACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_COLOR:
return VK_BLEND_FACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
default:
return factor;
}
}
void Vulkan::Util::SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
const VkViewport vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
}
void Vulkan::Util::SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height)
{
const VkRect2D scissor{{x, y}, {static_cast<u32>(width), static_cast<u32>(height)}};
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void Vulkan::Util::SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /* = 0.0f */, float max_depth /* = 1.0f */)
{
const VkViewport vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
const VkRect2D scissor{{x, y}, {static_cast<u32>(width), static_cast<u32>(height)}};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void Vulkan::Util::SetViewportAndClampScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth /*= 0.0f*/, float max_depth /*= 1.0f*/)
{
const VkViewport vp{static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(width),
static_cast<float>(height),
min_depth,
max_depth};
vkCmdSetViewport(command_buffer, 0, 1, &vp);
const int cx = std::max(x, 0);
const int cy = std::max(y, 0);
const int cwidth = width - (cx - x);
const int cheight = height - (cy - y);
const VkRect2D scissor{{cx, cy}, {static_cast<u32>(cwidth), static_cast<u32>(cheight)}};
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
}
void Vulkan::Util::SafeDestroyFramebuffer(VkFramebuffer& fb)
{
if (fb != VK_NULL_HANDLE)
{
vkDestroyFramebuffer(g_vulkan_context->GetDevice(), fb, nullptr);
fb = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyShaderModule(VkShaderModule& sm)
{
if (sm != VK_NULL_HANDLE)
{
vkDestroyShaderModule(g_vulkan_context->GetDevice(), sm, nullptr);
sm = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyPipeline(VkPipeline& p)
{
if (p != VK_NULL_HANDLE)
{
vkDestroyPipeline(g_vulkan_context->GetDevice(), p, nullptr);
p = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyPipelineLayout(VkPipelineLayout& pl)
{
if (pl != VK_NULL_HANDLE)
{
vkDestroyPipelineLayout(g_vulkan_context->GetDevice(), pl, nullptr);
pl = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl)
{
if (dsl != VK_NULL_HANDLE)
{
vkDestroyDescriptorSetLayout(g_vulkan_context->GetDevice(), dsl, nullptr);
dsl = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyBufferView(VkBufferView& bv)
{
if (bv != VK_NULL_HANDLE)
{
vkDestroyBufferView(g_vulkan_context->GetDevice(), bv, nullptr);
bv = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroyImageView(VkImageView& iv)
{
if (iv != VK_NULL_HANDLE)
{
vkDestroyImageView(g_vulkan_context->GetDevice(), iv, nullptr);
iv = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroySampler(VkSampler& samp)
{
if (samp != VK_NULL_HANDLE)
{
vkDestroySampler(g_vulkan_context->GetDevice(), samp, nullptr);
samp = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeDestroySemaphore(VkSemaphore& sem)
{
if (sem != VK_NULL_HANDLE)
{
vkDestroySemaphore(g_vulkan_context->GetDevice(), sem, nullptr);
sem = VK_NULL_HANDLE;
}
}
void Vulkan::Util::SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds)
{
if (ds != VK_NULL_HANDLE)
{
g_vulkan_context->FreeGlobalDescriptorSet(ds);
ds = VK_NULL_HANDLE;
}
}
void Vulkan::Util::BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask)
{
VkBufferMemoryBarrier buffer_info = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType
nullptr, // const void* pNext
src_access_mask, // VkAccessFlags srcAccessMask
dst_access_mask, // VkAccessFlags dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
buffer, // VkBuffer buffer
offset, // VkDeviceSize offset
size // VkDeviceSize size
};
vkCmdPipelineBarrier(command_buffer, src_stage_mask, dst_stage_mask, 0, 0, nullptr, 1, &buffer_info, 0, nullptr);
}
void Vulkan::Util::AddPointerToChain(void* head, const void* ptr)
{
VkBaseInStructure* last_st = static_cast<VkBaseInStructure*>(head);
while (last_st->pNext)
{
if (last_st->pNext == ptr)
return;
last_st = const_cast<VkBaseInStructure*>(last_st->pNext);
}
last_st->pNext = static_cast<const VkBaseInStructure*>(ptr);
}
VkShaderModule Vulkan::Util::CreateShaderModule(const u32* spv, size_t spv_word_count)
{
VkShaderModuleCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
info.codeSize = spv_word_count * sizeof(u32);
info.pCode = spv;
VkShaderModule module;
VkResult res = vkCreateShaderModule(g_vulkan_context->GetDevice(), &info, nullptr, &module);
if (res != VK_SUCCESS)
{
LOG_VULKAN_ERROR(res, "vkCreateShaderModule failed: ");
return VK_NULL_HANDLE;
}
return module;
}
VkShaderModule Vulkan::Util::CompileAndCreateVertexShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileVertexShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule Vulkan::Util::CompileAndCreateGeometryShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileGeometryShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule Vulkan::Util::CompileAndCreateFragmentShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileFragmentShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
VkShaderModule Vulkan::Util::CompileAndCreateComputeShader(std::string_view source_code)
{
std::optional<ShaderCompiler::SPIRVCodeVector> code = ShaderCompiler::CompileComputeShader(source_code);
if (!code)
return VK_NULL_HANDLE;
return CreateShaderModule(code->data(), code->size());
}
const char* Vulkan::Util::VkResultToString(VkResult res)
{
switch (res)
{
case VK_SUCCESS:
return "VK_SUCCESS";
case VK_NOT_READY:
return "VK_NOT_READY";
case VK_TIMEOUT:
return "VK_TIMEOUT";
case VK_EVENT_SET:
return "VK_EVENT_SET";
case VK_EVENT_RESET:
return "VK_EVENT_RESET";
case VK_INCOMPLETE:
return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY:
return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY:
return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED:
return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST:
return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED:
return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT:
return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT:
return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT:
return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER:
return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS:
return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED:
return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_SURFACE_LOST_KHR:
return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR:
return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR:
return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR:
return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR:
return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT:
return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV:
return "VK_ERROR_INVALID_SHADER_NV";
default:
return "UNKNOWN_VK_RESULT";
}
}
const char* Vulkan::Util::VkImageLayoutToString(VkImageLayout layout)
{
switch (layout)
{
case VK_IMAGE_LAYOUT_UNDEFINED:
return "VK_IMAGE_LAYOUT_UNDEFINED";
case VK_IMAGE_LAYOUT_GENERAL:
return "VK_IMAGE_LAYOUT_GENERAL";
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
return "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL";
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL";
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL";
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
return "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL";
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
return "VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL";
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
return "VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL";
case VK_IMAGE_LAYOUT_PREINITIALIZED:
return "VK_IMAGE_LAYOUT_PREINITIALIZED";
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL";
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL";
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL";
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
return "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL";
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
return "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL";
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
return "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL";
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
return "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR";
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
return "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR";
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
return "VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV";
case VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT:
return "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT";
default:
return "UNKNOWN_VK_RESULT";
}
}
void Vulkan::Util::LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...)
{
std::va_list ap;
va_start(ap, msg);
std::string real_msg = StringUtil::StdStringFromFormatV(msg, ap);
va_end(ap);
Log::Writef("Vulkan", func_name, static_cast<LOGLEVEL>(level), "(%s) %s (%d: %s)", func_name, real_msg.c_str(),
static_cast<int>(res), VkResultToString(res));
}
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
u8 Vulkan::Util::DebugScope<VkCommandBuffer>::depth = 0;
u8 Vulkan::Util::DebugScope<VkQueue>::depth = 0;
static std::array<float, 4> Palette(float phase, const std::array<float, 3>& a, const std::array<float, 3>& b,
const std::array<float, 3>& c, const std::array<float, 3>& d)
{
std::array<float, 4> result;
result[0] = a[0] + b[0] * std::cos(6.28318f * (c[0] * phase + d[0]));
result[1] = a[1] + b[1] * std::cos(6.28318f * (c[1] * phase + d[1]));
result[2] = a[2] + b[2] * std::cos(6.28318f * (c[2] * phase + d[2]));
result[3] = 1.0f;
return result;
}
Vulkan::Util::DebugScope<VkCommandBuffer>::DebugScope(VkCommandBuffer context, const char* format, ...)
: command_buffer(context)
{
if (command_buffer)
{
std::va_list ap;
SmallString str;
va_start(ap, format);
str.FormatVA(format, ap);
va_end(ap);
++depth;
const float depth_phase = depth / static_cast<float>(max_depth);
BeginDebugScope(
command_buffer, str,
Palette(depth_phase, {0.5f, 0.5f, 0.5f}, {0.5f, 0.5f, 0.5f}, {1.0f, 1.0f, 0.5f}, {0.8f, 0.90f, 0.30f}));
}
}
Vulkan::Util::DebugScope<VkCommandBuffer>::~DebugScope()
{
if (command_buffer)
{
--depth;
EndDebugScope(command_buffer);
}
}
Vulkan::Util::DebugScope<VkQueue>::DebugScope(VkQueue context, const char* format, ...) : queue(context)
{
if (queue)
{
std::va_list ap;
va_start(ap, format);
SmallString str;
str.FormatVA(format, ap);
va_end(ap);
const float depth_phase = depth / static_cast<float>(max_depth);
BeginDebugScope(
queue, str,
Palette(depth_phase, {0.5f, 0.5f, 0.5f}, {0.5f, 0.5f, 0.5f}, {2.0f, 1.0f, 0.0f}, {0.5f, 0.20f, 0.25f}));
++depth;
}
}
Vulkan::Util::DebugScope<VkQueue>::~DebugScope()
{
if (queue)
{
--depth;
EndDebugScope(queue);
}
}
#endif

View File

@ -1,306 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "../string.h"
#include "../types.h"
#include "context.h"
#include "loader.h"
#include <algorithm>
#include <array>
#include <cstdarg>
#include <string_view>
namespace Vulkan::Util {
inline constexpr u32 MakeRGBA8Color(float r, float g, float b, float a)
{
return (static_cast<u32>(std::clamp(static_cast<int>(r * 255.0f), 0, 255)) << 0) |
(static_cast<u32>(std::clamp(static_cast<int>(g * 255.0f), 0, 255)) << 8) |
(static_cast<u32>(std::clamp(static_cast<int>(b * 255.0f), 0, 255)) << 16) |
(static_cast<u32>(std::clamp(static_cast<int>(a * 255.0f), 0, 255)) << 24);
}
bool IsDepthFormat(VkFormat format);
bool IsCompressedFormat(VkFormat format);
VkFormat GetLinearFormat(VkFormat format);
u32 GetTexelSize(VkFormat format);
u32 GetBlockSize(VkFormat format);
// Clamps a VkRect2D to the specified dimensions.
VkRect2D ClampRect2D(const VkRect2D& rect, u32 width, u32 height);
// Map {SRC,DST}_COLOR to {SRC,DST}_ALPHA
VkBlendFactor GetAlphaBlendFactor(VkBlendFactor factor);
// Safe destroy helpers
void SafeDestroyFramebuffer(VkFramebuffer& fb);
void SafeDestroyShaderModule(VkShaderModule& sm);
void SafeDestroyPipeline(VkPipeline& p);
void SafeDestroyPipelineLayout(VkPipelineLayout& pl);
void SafeDestroyDescriptorSetLayout(VkDescriptorSetLayout& dsl);
void SafeDestroyBufferView(VkBufferView& bv);
void SafeDestroyImageView(VkImageView& iv);
void SafeDestroySampler(VkSampler& samp);
void SafeDestroySemaphore(VkSemaphore& sem);
void SafeFreeGlobalDescriptorSet(VkDescriptorSet& ds);
void SetViewport(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f,
float max_depth = 1.0f);
void SetScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height);
// Combines viewport and scissor updates
void SetViewportAndScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height, float min_depth = 0.0f,
float max_depth = 1.0f);
void SetViewportAndClampScissor(VkCommandBuffer command_buffer, int x, int y, int width, int height,
float min_depth = 0.0f, float max_depth = 1.0f);
// Wrapper for creating an barrier on a buffer
void BufferMemoryBarrier(VkCommandBuffer command_buffer, VkBuffer buffer, VkAccessFlags src_access_mask,
VkAccessFlags dst_access_mask, VkDeviceSize offset, VkDeviceSize size,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask);
// Adds a structure to a chain.
void AddPointerToChain(void* head, const void* ptr);
// Create a shader module from the specified SPIR-V.
VkShaderModule CreateShaderModule(const u32* spv, size_t spv_word_count);
// Compile a vertex shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateVertexShader(std::string_view source_code);
// Compile a geometry shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateGeometryShader(std::string_view source_code);
// Compile a fragment shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateFragmentShader(std::string_view source_code);
// Compile a compute shader and create a shader module, discarding the intermediate SPIR-V.
VkShaderModule CompileAndCreateComputeShader(std::string_view source_code);
const char* VkResultToString(VkResult res);
const char* VkImageLayoutToString(VkImageLayout layout);
void LogVulkanResult(int level, const char* func_name, VkResult res, const char* msg, ...) printflike(4, 5);
#define LOG_VULKAN_ERROR(res, ...) ::Vulkan::Util::LogVulkanResult(1, __func__, res, __VA_ARGS__)
#if defined(_DEBUG)
// We can't use the templates below because they're all the same type on 32-bit.
#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || \
defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
#define ENABLE_VULKAN_DEBUG_OBJECTS 1
#endif
#endif
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
// Provides a compile-time mapping between a Vulkan-type into its matching VkObjectType
template<typename T>
struct VkObjectTypeMap;
// clang-format off
template<> struct VkObjectTypeMap<VkInstance > { using type = VkInstance ; static constexpr VkObjectType value = VK_OBJECT_TYPE_INSTANCE; };
template<> struct VkObjectTypeMap<VkPhysicalDevice > { using type = VkPhysicalDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PHYSICAL_DEVICE; };
template<> struct VkObjectTypeMap<VkDevice > { using type = VkDevice ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE; };
template<> struct VkObjectTypeMap<VkQueue > { using type = VkQueue ; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUEUE; };
template<> struct VkObjectTypeMap<VkSemaphore > { using type = VkSemaphore ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SEMAPHORE; };
template<> struct VkObjectTypeMap<VkCommandBuffer > { using type = VkCommandBuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_BUFFER; };
template<> struct VkObjectTypeMap<VkFence > { using type = VkFence ; static constexpr VkObjectType value = VK_OBJECT_TYPE_FENCE; };
template<> struct VkObjectTypeMap<VkDeviceMemory > { using type = VkDeviceMemory ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEVICE_MEMORY; };
template<> struct VkObjectTypeMap<VkBuffer > { using type = VkBuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER; };
template<> struct VkObjectTypeMap<VkImage > { using type = VkImage ; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE; };
template<> struct VkObjectTypeMap<VkEvent > { using type = VkEvent ; static constexpr VkObjectType value = VK_OBJECT_TYPE_EVENT; };
template<> struct VkObjectTypeMap<VkQueryPool > { using type = VkQueryPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_QUERY_POOL; };
template<> struct VkObjectTypeMap<VkBufferView > { using type = VkBufferView ; static constexpr VkObjectType value = VK_OBJECT_TYPE_BUFFER_VIEW; };
template<> struct VkObjectTypeMap<VkImageView > { using type = VkImageView ; static constexpr VkObjectType value = VK_OBJECT_TYPE_IMAGE_VIEW; };
template<> struct VkObjectTypeMap<VkShaderModule > { using type = VkShaderModule ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SHADER_MODULE; };
template<> struct VkObjectTypeMap<VkPipelineCache > { using type = VkPipelineCache ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_CACHE; };
template<> struct VkObjectTypeMap<VkPipelineLayout > { using type = VkPipelineLayout ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE_LAYOUT; };
template<> struct VkObjectTypeMap<VkRenderPass > { using type = VkRenderPass ; static constexpr VkObjectType value = VK_OBJECT_TYPE_RENDER_PASS; };
template<> struct VkObjectTypeMap<VkPipeline > { using type = VkPipeline ; static constexpr VkObjectType value = VK_OBJECT_TYPE_PIPELINE; };
template<> struct VkObjectTypeMap<VkDescriptorSetLayout > { using type = VkDescriptorSetLayout ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT; };
template<> struct VkObjectTypeMap<VkSampler > { using type = VkSampler ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SAMPLER; };
template<> struct VkObjectTypeMap<VkDescriptorPool > { using type = VkDescriptorPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorSet > { using type = VkDescriptorSet ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_SET; };
template<> struct VkObjectTypeMap<VkFramebuffer > { using type = VkFramebuffer ; static constexpr VkObjectType value = VK_OBJECT_TYPE_FRAMEBUFFER; };
template<> struct VkObjectTypeMap<VkCommandPool > { using type = VkCommandPool ; static constexpr VkObjectType value = VK_OBJECT_TYPE_COMMAND_POOL; };
template<> struct VkObjectTypeMap<VkDescriptorUpdateTemplate> { using type = VkDescriptorUpdateTemplate; static constexpr VkObjectType value = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE; };
template<> struct VkObjectTypeMap<VkSurfaceKHR > { using type = VkSurfaceKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SURFACE_KHR; };
template<> struct VkObjectTypeMap<VkSwapchainKHR > { using type = VkSwapchainKHR ; static constexpr VkObjectType value = VK_OBJECT_TYPE_SWAPCHAIN_KHR; };
template<> struct VkObjectTypeMap<VkDebugUtilsMessengerEXT > { using type = VkDebugUtilsMessengerEXT ; static constexpr VkObjectType value = VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT; };
// clang-format on
#endif
inline void SetObjectName(VkDevice device, void* object_handle, VkObjectType object_type, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkSetDebugUtilsObjectNameEXT)
{
return;
}
std::va_list ap;
SmallString str;
va_start(ap, format);
str.FormatVA(format, ap);
va_end(ap);
const VkDebugUtilsObjectNameInfoEXT nameInfo{VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, nullptr, object_type,
reinterpret_cast<uint64_t>(object_handle), str};
vkSetDebugUtilsObjectNameEXT(device, &nameInfo);
#endif
}
template<typename T>
inline void SetObjectName(VkDevice device, T object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
va_start(ap, format);
SetObjectName(device, reinterpret_cast<void*>((typename VkObjectTypeMap<T>::type)object_handle),
VkObjectTypeMap<T>::value, format, ap);
va_end(ap);
#endif
}
template<>
inline void SetObjectName(VkDevice device, VmaAllocation object_handle, const char* format, ...)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
std::va_list ap;
SmallString str;
va_start(ap, format);
str.FormatVA(format, ap);
va_end(ap);
vmaSetAllocationName(g_vulkan_context->GetAllocator(), object_handle, str);
#endif
}
// Command buffer debug utils
inline void BeginDebugScope(VkCommandBuffer command_buffer, const char* scope_name,
const std::array<float, 4>& scope_color = {0.5, 0.5, 0.5, 1.0})
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkCmdBeginDebugUtilsLabelEXT)
{
return;
}
const VkDebugUtilsLabelEXT label{VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
nullptr,
scope_name,
{scope_color[0], scope_color[1], scope_color[2], scope_color[3]}};
vkCmdBeginDebugUtilsLabelEXT(command_buffer, &label);
#endif
}
inline void EndDebugScope(VkCommandBuffer command_buffer)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkCmdEndDebugUtilsLabelEXT)
{
return;
}
vkCmdEndDebugUtilsLabelEXT(command_buffer);
#endif
}
inline void InsertDebugLabel(VkCommandBuffer command_buffer, const char* label_name,
const std::array<float, 4>& label_color = {0.5, 0.5, 0.5, 1.0})
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkCmdInsertDebugUtilsLabelEXT)
{
return;
}
const VkDebugUtilsLabelEXT label{VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
nullptr,
label_name,
{label_color[0], label_color[1], label_color[2], label_color[3]}};
vkCmdInsertDebugUtilsLabelEXT(command_buffer, &label);
#endif
}
// Queue debug utils
inline void BeginDebugScope(VkQueue queue, const char* scope_name,
const std::array<float, 4>& scope_color = {0.75, 0.75, 0.75, 1.0})
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkQueueBeginDebugUtilsLabelEXT)
{
return;
}
const VkDebugUtilsLabelEXT label{VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
nullptr,
scope_name,
{scope_color[0], scope_color[1], scope_color[2], scope_color[3]}};
vkQueueBeginDebugUtilsLabelEXT(queue, &label);
#endif
}
inline void EndDebugScope(VkQueue queue)
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkQueueEndDebugUtilsLabelEXT)
{
return;
}
vkQueueEndDebugUtilsLabelEXT(queue);
#endif
}
inline void InsertDebugLabel(VkQueue queue, const char* label_name,
const std::array<float, 4>& label_color = {0.75, 0.75, 0.75, 1.0})
{
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
if (!vkQueueInsertDebugUtilsLabelEXT)
{
return;
}
const VkDebugUtilsLabelEXT label{VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
nullptr,
label_name,
{label_color[0], label_color[1], label_color[2], label_color[3]}};
vkQueueInsertDebugUtilsLabelEXT(queue, &label);
#endif
}
template<typename T>
class DebugScope
{
public:
DebugScope(T context, const char* format, ...) {}
};
#ifdef ENABLE_VULKAN_DEBUG_OBJECTS
template<>
class DebugScope<VkCommandBuffer>
{
public:
DebugScope(VkCommandBuffer context, const char* format, ...);
~DebugScope();
private:
static constexpr u8 max_depth = 8u;
static u8 depth;
VkCommandBuffer command_buffer;
};
template<>
class DebugScope<VkQueue>
{
public:
DebugScope(VkQueue context, const char* format, ...);
~DebugScope();
private:
static constexpr u8 max_depth = 8u;
static u8 depth;
VkQueue queue;
};
#endif
} // namespace Vulkan::Util

View File

@ -4,7 +4,6 @@
#include "win32_progress_callback.h"
#include "common/log.h"
#include <CommCtrl.h>
#pragma comment(lib, "Comctl32.lib")
Log_SetChannel(Win32ProgressCallback);
Win32ProgressCallback::Win32ProgressCallback() : BaseProgressCallback()

View File

@ -125,35 +125,6 @@ target_include_directories(core PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/..")
target_link_libraries(core PUBLIC Threads::Threads common util zlib)
target_link_libraries(core PRIVATE stb xxhash imgui rapidjson)
if(WIN32)
target_sources(core PRIVATE
gpu_hw_d3d12.cpp
gpu_hw_d3d12.h
gpu_hw_d3d11.cpp
gpu_hw_d3d11.h
)
target_link_libraries(core PRIVATE winmm.lib)
endif()
if(ENABLE_CUBEB)
target_compile_definitions(core PUBLIC "WITH_CUBEB=1")
endif()
if(ENABLE_OPENGL)
target_sources(core PRIVATE
gpu_hw_opengl.cpp
gpu_hw_opengl.h
)
target_link_libraries(core PRIVATE glad)
endif()
if(ENABLE_VULKAN)
target_sources(core PRIVATE
gpu_hw_vulkan.cpp
gpu_hw_vulkan.h
)
endif()
if(${CPU_ARCH} STREQUAL "x64")
target_include_directories(core PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/../../dep/xbyak/xbyak")
target_compile_definitions(core PUBLIC "WITH_RECOMPILER=1" "WITH_MMAP_FASTMEM=1")

View File

@ -19,6 +19,7 @@
#include "resources.h"
#include "save_state_version.h"
#include "settings.h"
#include "shader_cache_version.h"
#include "spu.h"
#include "system.h"
#include "texture_replacements.h"
@ -27,7 +28,7 @@
#include "scmversion/scmversion.h"
#include "util/audio_stream.h"
#include "util/host_display.h"
#include "util/gpu_device.h"
#include "util/imgui_fullscreen.h"
#include "util/imgui_manager.h"
#include "util/ini_settings_interface.h"
@ -60,21 +61,11 @@
#ifdef _WIN32
#include "common/windows_headers.h"
#include "util/d3d11_host_display.h"
#include "util/d3d12_host_display.h"
#include <KnownFolders.h>
#include <ShlObj.h>
#include <mmsystem.h>
#endif
#ifdef WITH_OPENGL
#include "util/opengl_host_display.h"
#endif
#ifdef WITH_VULKAN
#include "util/vulkan_host_display.h"
#endif
Log_SetChannel(CommonHostInterface);
namespace CommonHost {
@ -144,52 +135,89 @@ void CommonHost::PumpMessagesOnCPUThread()
#endif
}
std::unique_ptr<HostDisplay> Host::CreateDisplayForAPI(RenderAPI api)
bool Host::CreateGPUDevice(RenderAPI api)
{
switch (api)
DebugAssert(!g_gpu_device);
Log_InfoPrintf("Trying to create a %s GPU device...", GPUDevice::RenderAPIToString(api));
g_gpu_device = GPUDevice::CreateDeviceForAPI(api);
// TODO: FSUI should always use vsync..
const bool vsync = System::IsValid() ? System::ShouldUseVSync() : g_settings.video_sync_enabled;
if (!g_gpu_device || !g_gpu_device->Create(g_settings.gpu_adapter,
g_settings.gpu_disable_shader_cache ? std::string_view() :
std::string_view(EmuFolders::Cache),
SHADER_CACHE_VERSION, g_settings.gpu_use_debug_device, vsync,
g_settings.gpu_threaded_presentation))
{
#ifdef WITH_VULKAN
case RenderAPI::Vulkan:
return std::make_unique<VulkanHostDisplay>();
#endif
#ifdef WITH_OPENGL
case RenderAPI::OpenGL:
case RenderAPI::OpenGLES:
return std::make_unique<OpenGLHostDisplay>();
#endif
#ifdef _WIN32
case RenderAPI::D3D12:
return std::make_unique<D3D12HostDisplay>();
case RenderAPI::D3D11:
return std::make_unique<D3D11HostDisplay>();
#endif
default:
#if defined(_WIN32) && defined(_M_ARM64)
return std::make_unique<D3D12HostDisplay>();
#elif defined(_WIN32)
return std::make_unique<D3D11HostDisplay>();
#elif defined(WITH_OPENGL)
return std::make_unique<OpenGLHostDisplay>();
#elif defined(WITH_VULKAN)
return std::make_unique<VulkanHostDisplay>();
#else
return {};
#endif
Log_ErrorPrintf("Failed to initialize GPU device.");
if (g_gpu_device)
g_gpu_device->Destroy();
g_gpu_device.reset();
return false;
}
if (!ImGuiManager::Initialize())
{
Log_ErrorPrintf("Failed to initialize ImGuiManager.");
g_gpu_device->Destroy();
g_gpu_device.reset();
return false;
}
}
bool CommonHost::CreateHostDisplayResources()
{
return true;
}
void CommonHost::ReleaseHostDisplayResources()
void Host::UpdateDisplayWindow()
{
if (!g_gpu_device)
return;
if (!g_gpu_device->UpdateWindow())
{
Host::ReportErrorAsync("Error", "Failed to change window after update. The log may contain more information.");
return;
}
ImGuiManager::WindowResized();
// If we're paused, re-present the current frame at the new window size.
if (System::IsValid() && System::IsPaused())
RenderDisplay(false);
}
void Host::ResizeDisplayWindow(s32 width, s32 height, float scale)
{
if (!g_gpu_device)
return;
Log_DevPrintf("Display window resized to %dx%d", width, height);
g_gpu_device->ResizeWindow(width, height, scale);
ImGuiManager::WindowResized();
// If we're paused, re-present the current frame at the new window size.
if (System::IsValid())
{
if (System::IsPaused())
RenderDisplay(false);
System::HostDisplayResized();
}
}
void Host::ReleaseGPUDevice()
{
if (!g_gpu_device)
return;
SaveStateSelectorUI::DestroyTextures();
FullscreenUI::Shutdown();
ImGuiManager::Shutdown();
Log_InfoPrintf("Destroying %s GPU device...", GPUDevice::RenderAPIToString(g_gpu_device->GetRenderAPI()));
g_gpu_device->Destroy();
g_gpu_device.reset();
}
#ifndef __ANDROID__
@ -458,7 +486,10 @@ void Host::DisplayLoadingScreen(const char* message, int progress_min /*= -1*/,
}
ImGui::End();
ImGui::SetNextWindowSize(ImVec2(width, (has_progress ? 50.0f : 30.0f) * scale), ImGuiCond_Always);
const float padding_and_rounding = 15.0f * scale;
ImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, padding_and_rounding);
ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(padding_and_rounding, padding_and_rounding));
ImGui::SetNextWindowSize(ImVec2(width, (has_progress ? 80.0f : 50.0f) * scale), ImGuiCond_Always);
ImGui::SetNextWindowPos(ImVec2(io.DisplaySize.x * 0.5f, (io.DisplaySize.y * 0.5f) + (100.0f * scale)),
ImGuiCond_Always, ImVec2(0.5f, 0.0f));
if (ImGui::Begin("LoadingScreen", nullptr,
@ -468,7 +499,17 @@ void Host::DisplayLoadingScreen(const char* message, int progress_min /*= -1*/,
{
if (has_progress)
{
ImGui::Text("%s: %d/%d", message, progress_value, progress_max);
ImGui::TextUnformatted(message);
TinyString buf;
buf.Fmt("{}/{}", progress_value, progress_max);
const ImVec2 prog_size = ImGui::CalcTextSize(buf.GetCharArray(), buf.GetCharArray() + buf.GetLength());
ImGui::SameLine();
ImGui::SetCursorPosX(width - padding_and_rounding - prog_size.x);
ImGui::TextUnformatted(buf.GetCharArray(), buf.GetCharArray() + buf.GetLength());
ImGui::SetCursorPosY(ImGui::GetCursorPosY() + 5.0f);
ImGui::ProgressBar(static_cast<float>(progress_value) / static_cast<float>(progress_max - progress_min),
ImVec2(-1.0f, 0.0f), "");
Log_InfoPrintf("%s: %d/%d", message, progress_value, progress_max);
@ -482,9 +523,10 @@ void Host::DisplayLoadingScreen(const char* message, int progress_min /*= -1*/,
}
}
ImGui::End();
ImGui::PopStyleVar(2);
ImGui::EndFrame();
g_host_display->Render(false);
g_gpu_device->Render(false);
ImGui::NewFrame();
}
@ -628,7 +670,6 @@ static void HotkeyModifyResolutionScale(s32 increment)
{
g_gpu->RestoreGraphicsAPIState();
g_gpu->UpdateSettings();
g_gpu->ResetGraphicsAPIState();
System::ClearMemorySaveStates();
Host::InvalidateDisplay();
}
@ -888,7 +929,6 @@ DEFINE_HOTKEY("TogglePGXP", TRANSLATE_NOOP("Hotkeys", "Graphics"), TRANSLATE_NOO
g_settings.gpu_pgxp_enable = !g_settings.gpu_pgxp_enable;
g_gpu->RestoreGraphicsAPIState();
g_gpu->UpdateSettings();
g_gpu->ResetGraphicsAPIState();
System::ClearMemorySaveStates();
Host::AddKeyedOSDMessage("TogglePGXP",
g_settings.gpu_pgxp_enable ?
@ -957,7 +997,6 @@ DEFINE_HOTKEY("TogglePGXPDepth", TRANSLATE_NOOP("Hotkeys", "Graphics"),
g_gpu->RestoreGraphicsAPIState();
g_gpu->UpdateSettings();
g_gpu->ResetGraphicsAPIState();
System::ClearMemorySaveStates();
Host::AddKeyedOSDMessage("TogglePGXPDepth",
g_settings.gpu_pgxp_depth_buffer ?
@ -977,7 +1016,6 @@ DEFINE_HOTKEY("TogglePGXPCPU", TRANSLATE_NOOP("Hotkeys", "Graphics"), TRANSLATE_
g_gpu->RestoreGraphicsAPIState();
g_gpu->UpdateSettings();
g_gpu->ResetGraphicsAPIState();
System::ClearMemorySaveStates();
Host::AddKeyedOSDMessage("TogglePGXPCPU",
g_settings.gpu_pgxp_cpu ?

View File

@ -35,8 +35,6 @@ void OnSystemPaused();
void OnSystemResumed();
void OnGameChanged(const std::string& disc_path, const std::string& game_serial, const std::string& game_name);
void PumpMessagesOnCPUThread();
bool CreateHostDisplayResources();
void ReleaseHostDisplayResources();
/// Returns the time elapsed in the current play session.
u64 GetSessionPlayedTime();

View File

@ -39,12 +39,7 @@
<ClCompile Include="game_list.cpp" />
<ClCompile Include="gpu_backend.cpp" />
<ClCompile Include="gpu_commands.cpp" />
<ClCompile Include="gpu_hw_d3d11.cpp" />
<ClCompile Include="gpu_hw_d3d12.cpp" />
<ClCompile Include="gpu_hw_shadergen.cpp" />
<ClCompile Include="gpu_hw_vulkan.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="gpu_sw.cpp" />
<ClCompile Include="gpu_sw_backend.cpp" />
<ClCompile Include="gte.cpp" />
@ -52,9 +47,6 @@
<ClCompile Include="gdb_protocol.cpp" />
<ClCompile Include="gpu.cpp" />
<ClCompile Include="gpu_hw.cpp" />
<ClCompile Include="gpu_hw_opengl.cpp">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClCompile>
<ClCompile Include="host.cpp" />
<ClCompile Include="host_interface_progress_callback.cpp" />
<ClCompile Include="host_settings.cpp" />
@ -113,12 +105,7 @@
<ClInclude Include="game_database.h" />
<ClInclude Include="game_list.h" />
<ClInclude Include="gpu_backend.h" />
<ClInclude Include="gpu_hw_d3d11.h" />
<ClInclude Include="gpu_hw_d3d12.h" />
<ClInclude Include="gpu_hw_shadergen.h" />
<ClInclude Include="gpu_hw_vulkan.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gpu_sw.h" />
<ClInclude Include="gpu_sw_backend.h" />
<ClInclude Include="gpu_types.h" />
@ -128,9 +115,6 @@
<ClInclude Include="gdb_protocol.h" />
<ClInclude Include="gpu.h" />
<ClInclude Include="gpu_hw.h" />
<ClInclude Include="gpu_hw_opengl.h">
<ExcludedFromBuild Condition="'$(Platform)'=='ARM64'">true</ExcludedFromBuild>
</ClInclude>
<ClInclude Include="gte_types.h" />
<ClInclude Include="host.h" />
<ClInclude Include="host_interface_progress_callback.h" />

View File

@ -8,7 +8,6 @@
<ClCompile Include="dma.cpp" />
<ClCompile Include="gdb_protocol.cpp" />
<ClCompile Include="gpu.cpp" />
<ClCompile Include="gpu_hw_opengl.cpp" />
<ClCompile Include="gpu_hw.cpp" />
<ClCompile Include="interrupt_controller.cpp" />
<ClCompile Include="cdrom.cpp" />
@ -23,7 +22,6 @@
<ClCompile Include="gpu_commands.cpp" />
<ClCompile Include="gpu_sw.cpp" />
<ClCompile Include="gpu_hw_shadergen.cpp" />
<ClCompile Include="gpu_hw_d3d11.cpp" />
<ClCompile Include="bios.cpp" />
<ClCompile Include="cpu_code_cache.cpp" />
<ClCompile Include="cpu_recompiler_register_cache.cpp" />
@ -41,7 +39,6 @@
<ClCompile Include="guncon.cpp" />
<ClCompile Include="playstation_mouse.cpp" />
<ClCompile Include="negcon.cpp" />
<ClCompile Include="gpu_hw_vulkan.cpp" />
<ClCompile Include="resources.cpp" />
<ClCompile Include="host_interface_progress_callback.cpp" />
<ClCompile Include="pgxp.cpp" />
@ -53,7 +50,6 @@
<ClCompile Include="gpu_sw_backend.cpp" />
<ClCompile Include="texture_replacements.cpp" />
<ClCompile Include="multitap.cpp" />
<ClCompile Include="gpu_hw_d3d12.cpp" />
<ClCompile Include="host.cpp" />
<ClCompile Include="game_database.cpp" />
<ClCompile Include="pcdrv.cpp" />
@ -74,7 +70,6 @@
<ClInclude Include="bus.h" />
<ClInclude Include="dma.h" />
<ClInclude Include="gpu.h" />
<ClInclude Include="gpu_hw_opengl.h" />
<ClInclude Include="gpu_hw.h" />
<ClInclude Include="interrupt_controller.h" />
<ClInclude Include="cdrom.h" />
@ -88,7 +83,6 @@
<ClInclude Include="settings.h" />
<ClInclude Include="gpu_sw.h" />
<ClInclude Include="gpu_hw_shadergen.h" />
<ClInclude Include="gpu_hw_d3d11.h" />
<ClInclude Include="bios.h" />
<ClInclude Include="cpu_recompiler_types.h" />
<ClInclude Include="cpu_code_cache.h" />
@ -104,7 +98,6 @@
<ClInclude Include="guncon.h" />
<ClInclude Include="playstation_mouse.h" />
<ClInclude Include="negcon.h" />
<ClInclude Include="gpu_hw_vulkan.h" />
<ClInclude Include="resources.h" />
<ClInclude Include="host_interface_progress_callback.h" />
<ClInclude Include="gte_types.h" />
@ -117,9 +110,7 @@
<ClInclude Include="gpu_backend.h" />
<ClInclude Include="gpu_sw_backend.h" />
<ClInclude Include="texture_replacements.h" />
<ClInclude Include="shader_cache_version.h" />
<ClInclude Include="multitap.h" />
<ClInclude Include="gpu_hw_d3d12.h" />
<ClInclude Include="gdb_protocol.h" />
<ClInclude Include="host.h" />
<ClInclude Include="host_settings.h" />
@ -132,5 +123,6 @@
<ClInclude Include="fullscreen_ui.h" />
<ClInclude Include="common_host.h" />
<ClInclude Include="achievements_private.h" />
<ClInclude Include="shader_cache_version.h" />
</ItemGroup>
</Project>

View File

@ -18,10 +18,10 @@
#include "resources.h"
#include "settings.h"
#include "system.h"
#include "util/host_display.h"
#include "scmversion/scmversion.h"
#include "util/gpu_device.h"
#include "util/imgui_fullscreen.h"
#include "util/imgui_manager.h"
#include "util/ini_settings_interface.h"
@ -404,7 +404,7 @@ static std::unique_ptr<GameList::Entry> s_game_settings_entry;
static std::vector<std::pair<std::string, bool>> s_game_list_directories_cache;
static std::vector<std::string> s_graphics_adapter_list_cache;
static std::vector<std::string> s_fullscreen_mode_list_cache;
static FrontendCommon::PostProcessingChain s_postprocessing_chain;
static PostProcessingChain s_postprocessing_chain;
static std::vector<const HotkeyInfo*> s_hotkey_list_cache;
static std::atomic_bool s_settings_changed{false};
static std::atomic_bool s_game_settings_changed{false};
@ -2394,7 +2394,7 @@ void FullscreenUI::SwitchToGameSettings(const GameList::Entry* entry)
void FullscreenUI::PopulateGraphicsAdapterList()
{
HostDisplay::AdapterAndModeList ml(g_host_display->GetAdapterAndModeList());
GPUDevice::AdapterAndModeList ml(g_gpu_device->GetAdapterAndModeList());
s_graphics_adapter_list_cache = std::move(ml.adapter_names);
s_fullscreen_mode_list_cache = std::move(ml.fullscreen_modes);
s_fullscreen_mode_list_cache.insert(s_fullscreen_mode_list_cache.begin(), FSUI_STR("Borderless Fullscreen"));
@ -3653,7 +3653,7 @@ void FullscreenUI::DrawDisplaySettingsPage()
adapter.has_value() ? (adapter->empty() ? FSUI_CSTR("Default") : adapter->c_str()) :
FSUI_CSTR("Use Global Setting")))
{
HostDisplay::AdapterAndModeList aml(g_host_display->GetAdapterAndModeList());
GPUDevice::AdapterAndModeList aml(g_gpu_device->GetAdapterAndModeList());
ImGuiFullscreen::ChoiceDialogOptions options;
options.reserve(aml.adapter_names.size() + 2);
@ -3698,7 +3698,7 @@ void FullscreenUI::DrawDisplaySettingsPage()
fsmode.has_value() ? (fsmode->empty() ? FSUI_CSTR("Borderless Fullscreen") : fsmode->c_str()) :
FSUI_CSTR("Use Global Setting")))
{
HostDisplay::AdapterAndModeList aml(g_host_display->GetAdapterAndModeList());
GPUDevice::AdapterAndModeList aml(g_gpu_device->GetAdapterAndModeList());
ImGuiFullscreen::ChoiceDialogOptions options;
options.reserve(aml.fullscreen_modes.size() + 2);
@ -3939,7 +3939,7 @@ void FullscreenUI::SavePostProcessingChain()
const std::string config(s_postprocessing_chain.GetConfigString());
bsi->SetStringValue("Display", "PostProcessChain", config.c_str());
if (bsi->GetBoolValue("Display", "PostProcessing", false))
g_host_display->SetPostProcessingChain(config);
g_gpu_device->SetPostProcessingChain(config);
if (IsEditingGameSettings(bsi))
{
s_game_settings_interface->Save();
@ -3975,7 +3975,7 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
bsi->GetBoolValue("Display", "PostProcessing", false)))
{
const std::string chain(bsi->GetStringValue("Display", "PostProcessChain", ""));
g_host_display->SetPostProcessingChain(chain);
g_gpu_device->SetPostProcessingChain(chain);
if (chain.empty())
ShowToast(std::string(), FSUI_STR("Post-processing chain is empty."));
else
@ -3987,7 +3987,7 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
if (MenuButton(FSUI_ICONSTR(ICON_FA_PLUS, "Add Shader"), FSUI_CSTR("Adds a new shader to the chain.")))
{
ImGuiFullscreen::ChoiceDialogOptions options;
for (std::string& name : FrontendCommon::PostProcessingChain::GetAvailableShaderNames())
for (std::string& name : PostProcessingChain::GetAvailableShaderNames())
options.emplace_back(std::move(name), false);
OpenChoiceDialog(FSUI_ICONSTR(ICON_FA_PLUS, "Add Shader"), false, std::move(options),
@ -4034,8 +4034,8 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
for (u32 stage_index = 0; stage_index < s_postprocessing_chain.GetStageCount(); stage_index++)
{
ImGui::PushID(stage_index);
FrontendCommon::PostProcessingShader& stage = s_postprocessing_chain.GetShaderStage(stage_index);
str.Fmt(FSUI_FSTR("Stage {}: {}"), stage_index + 1, stage.GetName());
PostProcessingShader* stage = s_postprocessing_chain.GetShaderStage(stage_index);
str.Fmt(FSUI_FSTR("Stage {}: {}"), stage_index + 1, stage->GetName());
MenuHeading(str);
if (MenuButton(FSUI_ICONSTR(ICON_FA_TIMES, "Remove From Chain"), FSUI_CSTR("Removes this shader from the chain.")))
@ -4059,11 +4059,11 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
postprocessing_action_index = stage_index;
}
for (FrontendCommon::PostProcessingShader::Option& opt : stage.GetOptions())
for (PostProcessingShader::Option& opt : stage->GetOptions())
{
switch (opt.type)
{
case FrontendCommon::PostProcessingShader::Option::Type::Bool:
case PostProcessingShader::Option::Type::Bool:
{
bool value = (opt.value[0].int_value != 0);
tstr.Fmt(ICON_FA_COGS "{}", opt.ui_name);
@ -4078,7 +4078,7 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
}
break;
case FrontendCommon::PostProcessingShader::Option::Type::Float:
case PostProcessingShader::Option::Type::Float:
{
tstr.Fmt(ICON_FA_RULER_VERTICAL "{}##{}", opt.ui_name, opt.name);
str.Fmt(FSUI_FSTR("Value: {} | Default: {} | Minimum: {} | Maximum: {}"), opt.value[0].float_value,
@ -4181,7 +4181,7 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
}
break;
case FrontendCommon::PostProcessingShader::Option::Type::Int:
case PostProcessingShader::Option::Type::Int:
{
tstr.Fmt(ICON_FA_RULER_VERTICAL "{}##{}", opt.ui_name, opt.name);
str.Fmt(FSUI_FSTR("Value: {} | Default: {} | Minimum: {} | Maximum: {}"), opt.value[0].int_value,
@ -4293,9 +4293,9 @@ void FullscreenUI::DrawPostProcessingSettingsPage()
{
case POSTPROCESSING_ACTION_REMOVE:
{
FrontendCommon::PostProcessingShader& stage = s_postprocessing_chain.GetShaderStage(postprocessing_action_index);
PostProcessingShader* stage = s_postprocessing_chain.GetShaderStage(postprocessing_action_index);
ShowToast(std::string(),
fmt::format(FSUI_FSTR("Removed stage {} ({})."), postprocessing_action_index + 1, stage.GetName()));
fmt::format(FSUI_FSTR("Removed stage {} ({})."), postprocessing_action_index + 1, stage->GetName()));
s_postprocessing_chain.RemoveStage(postprocessing_action_index);
SavePostProcessingChain();
}
@ -4598,7 +4598,9 @@ void FullscreenUI::DrawAchievementsSettingsPage()
EndMenuButtons();
}
void FullscreenUI::DrawAchievementsLoginWindow() {}
void FullscreenUI::DrawAchievementsLoginWindow()
{
}
#endif
@ -5016,15 +5018,16 @@ void FullscreenUI::PopulateSaveStateScreenshot(SaveStateListEntry* li, const Ext
li->preview_texture.reset();
if (ssi && !ssi->screenshot_data.empty())
{
li->preview_texture =
g_host_display->CreateTexture(ssi->screenshot_width, ssi->screenshot_height, 1, 1, 1, GPUTexture::Format::RGBA8,
li->preview_texture = g_gpu_device->CreateTexture(
ssi->screenshot_width, ssi->screenshot_height, 1, 1, 1, GPUTexture::Type::Texture, GPUTexture::Format::RGBA8,
ssi->screenshot_data.data(), sizeof(u32) * ssi->screenshot_width, false);
}
else
{
li->preview_texture = g_host_display->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Format::RGBA8,
Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH, false);
li->preview_texture = g_gpu_device->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Type::Texture,
GPUTexture::Format::RGBA8, Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH,
false);
}
if (!li->preview_texture)

View File

@ -8,13 +8,13 @@
#include "common/string_util.h"
#include "dma.h"
#include "host.h"
#include "util/host_display.h"
#include "imgui.h"
#include "interrupt_controller.h"
#include "settings.h"
#include "stb_image_write.h"
#include "system.h"
#include "timers.h"
#include "util/gpu_device.h"
#include "util/state_wrapper.h"
#include <cmath>
Log_SetChannel(GPU);
@ -27,8 +27,8 @@ GPU::GPU() = default;
GPU::~GPU()
{
if (g_host_display)
g_host_display->SetGPUTimingEnabled(false);
if (g_gpu_device)
g_gpu_device->SetGPUTimingEnabled(false);
}
bool GPU::Initialize()
@ -49,12 +49,12 @@ bool GPU::Initialize()
UpdateCRTCConfig();
if (g_settings.display_post_processing && !g_settings.display_post_process_chain.empty() &&
!g_host_display->SetPostProcessingChain(g_settings.display_post_process_chain))
!g_gpu_device->SetPostProcessingChain(g_settings.display_post_process_chain))
{
Host::AddOSDMessage(TRANSLATE_STR("OSDMessage", "Failed to load post processing shader chain."), 20.0f);
}
g_host_display->SetGPUTimingEnabled(g_settings.display_show_gpu);
g_gpu_device->SetGPUTimingEnabled(g_settings.display_show_gpu);
return true;
}
@ -75,13 +75,7 @@ void GPU::UpdateSettings()
// Crop mode calls this, so recalculate the display area
UpdateCRTCDisplayParameters();
g_host_display->SetGPUTimingEnabled(g_settings.display_show_gpu);
}
bool GPU::IsHardwareRenderer()
{
const GPURenderer renderer = GetRendererType();
return (renderer != GPURenderer::Software);
g_gpu_device->SetGPUTimingEnabled(g_settings.display_show_gpu);
}
void GPU::CPUClockChanged()
@ -89,7 +83,9 @@ void GPU::CPUClockChanged()
UpdateCRTCConfig();
}
void GPU::UpdateResolutionScale() {}
void GPU::UpdateResolutionScale()
{
}
std::tuple<u32, u32> GPU::GetEffectiveDisplayResolution(bool scaled /* = true */)
{
@ -168,6 +164,8 @@ void GPU::SoftReset()
bool GPU::DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display)
{
FlushRender();
if (sw.IsReading())
{
// perform a reset to discard all pending draws/fb state
@ -293,9 +291,9 @@ bool GPU::DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_displ
return !sw.HasError();
}
void GPU::ResetGraphicsAPIState() {}
void GPU::RestoreGraphicsAPIState() {}
void GPU::RestoreGraphicsAPIState()
{
}
void GPU::UpdateDMARequest()
{
@ -980,8 +978,8 @@ void GPU::UpdateCommandTickEvent()
bool GPU::ConvertScreenCoordinatesToBeamTicksAndLines(s32 window_x, s32 window_y, float x_scale, u32* out_tick,
u32* out_line) const
{
auto [display_x, display_y] = g_host_display->ConvertWindowCoordinatesToDisplayCoordinates(
window_x, window_y, g_host_display->GetWindowWidth(), g_host_display->GetWindowHeight());
auto [display_x, display_y] = g_gpu_device->ConvertWindowCoordinatesToDisplayCoordinates(
window_x, window_y, g_gpu_device->GetWindowWidth(), g_gpu_device->GetWindowHeight());
if (x_scale != 1.0f)
{
@ -1284,11 +1282,17 @@ void GPU::HandleGetGPUInfoCommand(u32 value)
}
}
void GPU::ClearDisplay() {}
void GPU::ClearDisplay()
{
}
void GPU::UpdateDisplay() {}
void GPU::UpdateDisplay()
{
}
void GPU::ReadVRAM(u32 x, u32 y, u32 width, u32 height) {}
void GPU::ReadVRAM(u32 x, u32 y, u32 width, u32 height)
{
}
void GPU::FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color)
{
@ -1446,9 +1450,13 @@ void GPU::CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 he
}
}
void GPU::DispatchRenderCommand() {}
void GPU::DispatchRenderCommand()
{
}
void GPU::FlushRender() {}
void GPU::FlushRender()
{
}
void GPU::SetDrawMode(u16 value)
{
@ -1687,4 +1695,6 @@ void GPU::DrawDebugStateWindow()
ImGui::End();
}
void GPU::DrawRendererStats(bool is_idle_frame) {}
void GPU::DrawRendererStats(bool is_idle_frame)
{
}

View File

@ -17,13 +17,12 @@
class StateWrapper;
class HostDisplay;
class GPUDevice;
class GPUTexture;
class TimingEvent;
namespace Threading
{
namespace Threading {
class Thread;
}
@ -80,21 +79,20 @@ public:
GPU();
virtual ~GPU();
virtual GPURenderer GetRendererType() const = 0;
virtual const Threading::Thread* GetSWThread() const = 0;
virtual bool IsHardwareRenderer() const = 0;
virtual bool Initialize();
virtual void Reset(bool clear_vram);
virtual bool DoState(StateWrapper& sw, GPUTexture** save_to_texture, bool update_display);
// Graphics API state reset/restore - call when drawing the UI etc.
virtual void ResetGraphicsAPIState();
// TODO: replace with "invalidate cached state"
virtual void RestoreGraphicsAPIState();
// Render statistics debug window.
void DrawDebugStateWindow();
bool IsHardwareRenderer();
void CPUClockChanged();
// MMIO access
@ -161,25 +159,7 @@ public:
float ComputeVerticalFrequency() const;
float GetDisplayAspectRatio() const;
#ifdef _WIN32
// gpu_hw_d3d11.cpp
static std::unique_ptr<GPU> CreateHardwareD3D11Renderer();
// gpu_hw_d3d12.cpp
static std::unique_ptr<GPU> CreateHardwareD3D12Renderer();
#endif
#ifdef WITH_OPENGL
// gpu_hw_opengl.cpp
static std::unique_ptr<GPU> CreateHardwareOpenGLRenderer();
#endif
#ifdef WITH_VULKAN
// gpu_hw_vulkan.cpp
static std::unique_ptr<GPU> CreateHardwareVulkanRenderer();
#endif
// gpu_sw.cpp
static std::unique_ptr<GPU> CreateHardwareRenderer();
static std::unique_ptr<GPU> CreateSoftwareRenderer();
// Converts window coordinates into horizontal ticks and scanlines. Returns false if out of range. Used for lightguns.
@ -192,6 +172,9 @@ public:
// Dumps raw VRAM to a file.
bool DumpVRAMToFile(const char* filename);
// Ensures all buffered vertices are drawn.
virtual void FlushRender();
protected:
TickCount CRTCTicksToSystemTicks(TickCount crtc_ticks, TickCount fractional_ticks) const;
TickCount SystemTicksToCRTCTicks(TickCount sysclk_ticks, TickCount* fractional_ticks) const;
@ -291,7 +274,6 @@ protected:
virtual void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask);
virtual void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height);
virtual void DispatchRenderCommand();
virtual void FlushRender();
virtual void ClearDisplay();
virtual void UpdateDisplay();
virtual void DrawRendererStats(bool is_idle_frame);

File diff suppressed because it is too large Load Diff

View File

@ -2,9 +2,15 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/heap_array.h"
#include "gpu.h"
#include "util/host_display.h"
#include "texture_replacements.h"
#include "util/gpu_device.h"
#include "common/dimensional_array.h"
#include "common/heap_array.h"
#include <sstream>
#include <string>
#include <tuple>
@ -15,7 +21,7 @@ class GPU_SW_Backend;
struct GPUBackendCommand;
struct GPUBackendDrawCommand;
class GPU_HW : public GPU
class GPU_HW final : public GPU
{
public:
enum class BatchRenderMode : u8
@ -34,24 +40,26 @@ public:
};
GPU_HW();
virtual ~GPU_HW();
~GPU_HW() override;
const Threading::Thread* GetSWThread() const override;
bool IsHardwareRenderer() const override;
virtual bool Initialize() override;
virtual void Reset(bool clear_vram) override;
virtual bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;
bool Initialize() override;
void Reset(bool clear_vram) override;
bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;
void RestoreGraphicsAPIState() override;
void UpdateSettings() override;
void UpdateResolutionScale() override final;
std::tuple<u32, u32> GetEffectiveDisplayResolution(bool scaled = true) override final;
std::tuple<u32, u32> GetFullDisplayResolution(bool scaled = true) override final;
protected:
private:
enum : u32
{
VRAM_UPDATE_TEXTURE_BUFFER_SIZE = 4 * 1024 * 1024,
VERTEX_BUFFER_SIZE = 4 * 1024 * 1024,
UNIFORM_BUFFER_SIZE = 2 * 1024 * 1024,
MAX_BATCH_VERTEX_COUNTER_IDS = 65536 - 2,
MAX_VERTICES_FOR_RECTANGLE = 6 * (((MAX_PRIMITIVE_WIDTH + (TEXTURE_PAGE_WIDTH - 1)) / TEXTURE_PAGE_WIDTH) + 1u) *
(((MAX_PRIMITIVE_HEIGHT + (TEXTURE_PAGE_HEIGHT - 1)) / TEXTURE_PAGE_HEIGHT) + 1u)
@ -129,43 +137,6 @@ protected:
u32 u_set_mask_while_drawing;
};
struct VRAMFillUBOData
{
u32 u_dst_x;
u32 u_dst_y;
u32 u_end_x;
u32 u_end_y;
float u_fill_color[4];
u32 u_interlaced_displayed_field;
};
struct VRAMWriteUBOData
{
u32 u_dst_x;
u32 u_dst_y;
u32 u_end_x;
u32 u_end_y;
u32 u_width;
u32 u_height;
u32 u_buffer_base_offset;
u32 u_mask_or_bits;
float u_depth_value;
};
struct VRAMCopyUBOData
{
u32 u_src_x;
u32 u_src_y;
u32 u_dst_x;
u32 u_dst_y;
u32 u_end_x;
u32 u_end_y;
u32 u_width;
u32 u_height;
u32 u_set_mask_bit;
float u_depth_value;
};
struct RendererStats
{
u32 num_batches;
@ -173,63 +144,42 @@ protected:
u32 num_uniform_buffer_updates;
};
class ShaderCompileProgressTracker
{
public:
ShaderCompileProgressTracker(std::string title, u32 total);
bool CreateBuffers();
void ClearFramebuffer();
void DestroyBuffers();
void Increment();
bool CompilePipelines();
void DestroyPipelines();
private:
std::string m_title;
u64 m_min_time;
u64 m_update_interval;
u64 m_start_time;
u64 m_last_update_time;
u32 m_progress;
u32 m_total;
};
static constexpr std::tuple<float, float, float, float> RGBA8ToFloat(u32 rgba)
{
return std::make_tuple(static_cast<float>(rgba & UINT32_C(0xFF)) * (1.0f / 255.0f),
static_cast<float>((rgba >> 8) & UINT32_C(0xFF)) * (1.0f / 255.0f),
static_cast<float>((rgba >> 16) & UINT32_C(0xFF)) * (1.0f / 255.0f),
static_cast<float>(rgba >> 24) * (1.0f / 255.0f));
}
void UpdateHWSettings(bool* framebuffer_changed, bool* shaders_changed);
virtual void UpdateVRAMReadTexture();
virtual void UpdateDepthBufferFromMaskBit() = 0;
virtual void ClearDepthBuffer() = 0;
virtual void SetScissorFromDrawingArea() = 0;
virtual void MapBatchVertexPointer(u32 required_vertices) = 0;
virtual void UnmapBatchVertexPointer(u32 used_vertices) = 0;
virtual void UploadUniformBuffer(const void* uniforms, u32 uniforms_size) = 0;
virtual void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) = 0;
void UpdateVRAMReadTexture();
void UpdateDepthBufferFromMaskBit();
void ClearDepthBuffer();
void SetScissor();
void MapBatchVertexPointer(u32 required_vertices);
void UnmapBatchVertexPointer(u32 used_vertices);
void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices);
void ClearDisplay() override;
void UpdateDisplay() override;
u32 CalculateResolutionScale() const;
GPUDownsampleMode GetDownsampleMode(u32 resolution_scale) const;
ALWAYS_INLINE bool IsUsingMultisampling() const { return m_multisamples > 1; }
ALWAYS_INLINE bool IsUsingDownsampling() const
{
return (m_downsample_mode != GPUDownsampleMode::Disabled && !m_GPUSTAT.display_area_color_depth_24);
}
bool IsUsingMultisampling() const;
bool IsUsingDownsampling() const;
void SetFullVRAMDirtyRectangle()
{
m_vram_dirty_rect.Set(0, 0, VRAM_WIDTH, VRAM_HEIGHT);
m_draw_mode.SetTexturePageChanged();
}
void ClearVRAMDirtyRectangle() { m_vram_dirty_rect.SetInvalid(); }
void SetFullVRAMDirtyRectangle();
void ClearVRAMDirtyRectangle();
void IncludeVRAMDirtyRectangle(const Common::Rectangle<u32>& rect);
bool IsFlushed() const { return m_batch_current_vertex_ptr == m_batch_start_vertex_ptr; }
u32 GetBatchVertexSpace() const { return static_cast<u32>(m_batch_end_vertex_ptr - m_batch_current_vertex_ptr); }
u32 GetBatchVertexCount() const { return static_cast<u32>(m_batch_current_vertex_ptr - m_batch_start_vertex_ptr); }
ALWAYS_INLINE bool IsFlushed() const { return m_batch_current_vertex_ptr == m_batch_start_vertex_ptr; }
ALWAYS_INLINE u32 GetBatchVertexSpace() const
{
return static_cast<u32>(m_batch_end_vertex_ptr - m_batch_current_vertex_ptr);
}
ALWAYS_INLINE u32 GetBatchVertexCount() const
{
return static_cast<u32>(m_batch_current_vertex_ptr - m_batch_start_vertex_ptr);
}
void EnsureVertexBufferSpace(u32 required_vertices);
void EnsureVertexBufferSpaceForCurrentCommand();
void ResetBatchVertexDepth();
@ -241,91 +191,31 @@ protected:
}
/// Returns the interlaced mode to use when scanning out/displaying.
ALWAYS_INLINE InterlacedRenderMode GetInterlacedRenderMode() const
{
if (IsInterlacedDisplayEnabled())
{
return m_GPUSTAT.vertical_resolution ? InterlacedRenderMode::InterleavedFields :
InterlacedRenderMode::SeparateFields;
}
else
{
return InterlacedRenderMode::None;
}
}
/// Returns true if the specified texture filtering mode requires dual-source blending.
ALWAYS_INLINE bool TextureFilterRequiresDualSourceBlend(GPUTextureFilter filter)
{
return (filter == GPUTextureFilter::Bilinear || filter == GPUTextureFilter::JINC2 ||
filter == GPUTextureFilter::xBR);
}
/// Returns true if alpha blending should be enabled for drawing the current batch.
ALWAYS_INLINE bool UseAlphaBlending(GPUTransparencyMode transparency_mode, BatchRenderMode render_mode) const
{
if (m_texture_filtering == GPUTextureFilter::Bilinear || m_texture_filtering == GPUTextureFilter::JINC2 ||
m_texture_filtering == GPUTextureFilter::xBR)
{
return true;
}
if (transparency_mode == GPUTransparencyMode::Disabled || render_mode == BatchRenderMode::OnlyOpaque)
return false;
return true;
}
InterlacedRenderMode GetInterlacedRenderMode() const;
/// We need two-pass rendering when using BG-FG blending and texturing, as the transparency can be enabled
/// on a per-pixel basis, and the opaque pixels shouldn't be blended at all.
ALWAYS_INLINE bool NeedsTwoPassRendering() const
{
// TODO: see if there's a better way we can do this. definitely can with fbfetch.
return (m_batch.texture_mode != GPUTextureMode::Disabled &&
(m_batch.transparency_mode == GPUTransparencyMode::BackgroundMinusForeground ||
(!m_supports_dual_source_blend && m_batch.transparency_mode != GPUTransparencyMode::Disabled)));
}
/// Returns true if the specified VRAM fill is oversized.
ALWAYS_INLINE static bool IsVRAMFillOversized(u32 x, u32 y, u32 width, u32 height)
{
return ((x + width) > VRAM_WIDTH || (y + height) > VRAM_HEIGHT);
}
ALWAYS_INLINE bool IsUsingSoftwareRendererForReadbacks() { return static_cast<bool>(m_sw_renderer); }
void FillBackendCommandParameters(GPUBackendCommand* cmd) const;
void FillDrawCommand(GPUBackendDrawCommand* cmd, GPURenderCommand rc) const;
void UpdateSoftwareRenderer(bool copy_vram_from_hw);
void ReadSoftwareRendererVRAM(u32 x, u32 y, u32 width, u32 height);
void UpdateSoftwareRendererVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask,
bool check_mask);
void FillSoftwareRendererVRAM(u32 x, u32 y, u32 width, u32 height, u32 color);
void CopySoftwareRendererVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height);
void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override;
void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override;
void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask) override;
void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override;
void DispatchRenderCommand() override;
void FlushRender() override;
void DrawRendererStats(bool is_idle_frame) override;
void CalcScissorRect(int* left, int* top, int* right, int* bottom);
std::tuple<s32, s32> ScaleVRAMCoordinates(s32 x, s32 y) const
{
return std::make_tuple(x * s32(m_resolution_scale), y * s32(m_resolution_scale));
}
/// Computes the area affected by a VRAM transfer, including wrap-around of X.
Common::Rectangle<u32> GetVRAMTransferBounds(u32 x, u32 y, u32 width, u32 height) const;
/// Returns true if the VRAM copy shader should be used (oversized copies, masking).
bool UseVRAMCopyShader(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) const;
VRAMFillUBOData GetVRAMFillUBOData(u32 x, u32 y, u32 width, u32 height, u32 color) const;
VRAMWriteUBOData GetVRAMWriteUBOData(u32 x, u32 y, u32 width, u32 height, u32 buffer_offset, bool set_mask,
bool check_mask) const;
VRAMCopyUBOData GetVRAMCopyUBOData(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) const;
bool BlitVRAMReplacementTexture(const TextureReplacementTexture* tex, u32 dst_x, u32 dst_y, u32 width, u32 height);
/// Expands a line into two triangles.
void DrawLine(float x0, float y0, u32 col0, float x1, float y1, u32 col1, float depth);
@ -340,22 +230,31 @@ protected:
void SetBatchDepthBuffer(bool enabled);
void CheckForDepthClear(const BatchVertex* vertices, u32 num_vertices);
/// UBO data for adaptive smoothing.
struct SmoothingUBOData
{
float min_uv[2];
float max_uv[2];
float rcp_size[2];
};
/// Returns the number of mipmap levels used for adaptive smoothing.
u32 GetAdaptiveDownsamplingMipLevels() const;
/// Returns the UBO data for an adaptive smoothing pass.
SmoothingUBOData GetSmoothingUBO(u32 level, u32 left, u32 top, u32 width, u32 height, u32 tex_width,
u32 tex_height) const;
void DownsampleFramebuffer(GPUTexture* source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferAdaptive(GPUTexture* source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferBoxFilter(GPUTexture* source, u32 left, u32 top, u32 width, u32 height);
std::unique_ptr<GPUTexture> m_vram_texture;
std::unique_ptr<GPUTexture> m_vram_depth_texture;
std::unique_ptr<GPUTexture> m_vram_depth_view;
std::unique_ptr<GPUTexture> m_vram_read_texture;
std::unique_ptr<GPUTexture> m_vram_readback_texture;
std::unique_ptr<GPUTexture> m_vram_replacement_texture;
std::unique_ptr<GPUTexture> m_display_texture;
std::unique_ptr<GPUFramebuffer> m_vram_framebuffer;
std::unique_ptr<GPUFramebuffer> m_vram_update_depth_framebuffer;
std::unique_ptr<GPUFramebuffer> m_vram_readback_framebuffer;
std::unique_ptr<GPUFramebuffer> m_display_framebuffer;
std::unique_ptr<GPUTextureBuffer> m_vram_upload_buffer;
std::unique_ptr<GPUTexture> m_vram_write_texture;
FixedHeapArray<u16, VRAM_WIDTH * VRAM_HEIGHT> m_vram_shadow;
std::unique_ptr<GPU_SW_Backend> m_sw_renderer;
BatchVertex* m_batch_start_vertex_ptr = nullptr;
@ -368,20 +267,17 @@ protected:
u32 m_resolution_scale = 1;
u32 m_multisamples = 1;
u32 m_max_resolution_scale = 1;
u32 m_max_multisamples = 1;
RenderAPI m_render_api = RenderAPI::None;
bool m_true_color = true;
union
{
BitField<u8, bool, 0, 1> m_supports_per_sample_shading;
BitField<u8, bool, 1, 1> m_supports_dual_source_blend;
BitField<u8, bool, 2, 1> m_supports_adaptive_downsampling;
BitField<u8, bool, 3, 1> m_supports_disable_color_perspective;
BitField<u8, bool, 4, 1> m_per_sample_shading;
BitField<u8, bool, 5, 1> m_scaled_dithering;
BitField<u8, bool, 6, 1> m_chroma_smoothing;
BitField<u8, bool, 7, 1> m_disable_color_perspective;
BitField<u8, bool, 2, 1> m_supports_disable_color_perspective;
BitField<u8, bool, 3, 1> m_per_sample_shading;
BitField<u8, bool, 4, 1> m_scaled_dithering;
BitField<u8, bool, 5, 1> m_chroma_smoothing;
BitField<u8, bool, 6, 1> m_disable_color_perspective;
u8 bits = 0;
};
@ -397,20 +293,45 @@ protected:
// Bounding box of VRAM area that the GPU has drawn into.
Common::Rectangle<u32> m_vram_dirty_rect;
// Changed state
bool m_batch_ubo_dirty = true;
// [depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing]
DimensionalArray<std::unique_ptr<GPUPipeline>, 2, 2, 5, 9, 4, 3> m_batch_pipelines{};
// [wrapped][interlaced]
DimensionalArray<std::unique_ptr<GPUPipeline>, 2, 2> m_vram_fill_pipelines{};
// [depth_test]
std::array<std::unique_ptr<GPUPipeline>, 2> m_vram_write_pipelines{};
std::array<std::unique_ptr<GPUPipeline>, 2> m_vram_copy_pipelines{};
std::unique_ptr<GPUPipeline> m_vram_readback_pipeline;
std::unique_ptr<GPUPipeline> m_vram_update_depth_pipeline;
// [depth_24][interlace_mode]
DimensionalArray<std::unique_ptr<GPUPipeline>, 3, 2> m_display_pipelines{};
// TODO: get rid of this, and use image blits instead where supported
std::unique_ptr<GPUPipeline> m_copy_pipeline;
std::unique_ptr<GPUTexture> m_downsample_texture;
std::unique_ptr<GPUTexture> m_downsample_render_texture;
std::unique_ptr<GPUFramebuffer> m_downsample_framebuffer;
std::unique_ptr<GPUTexture> m_downsample_weight_texture;
std::unique_ptr<GPUFramebuffer> m_downsample_weight_framebuffer;
std::unique_ptr<GPUPipeline> m_downsample_first_pass_pipeline;
std::unique_ptr<GPUPipeline> m_downsample_mid_pass_pipeline;
std::unique_ptr<GPUPipeline> m_downsample_blur_pass_pipeline;
std::unique_ptr<GPUPipeline> m_downsample_composite_pass_pipeline;
std::unique_ptr<GPUSampler> m_downsample_lod_sampler;
std::unique_ptr<GPUSampler> m_downsample_composite_sampler;
// Statistics
RendererStats m_renderer_stats = {};
RendererStats m_last_renderer_stats = {};
// Changed state
bool m_batch_ubo_dirty = true;
private:
enum : u32
{
MIN_BATCH_VERTEX_COUNT = 6,
MAX_BATCH_VERTEX_COUNT = VERTEX_BUFFER_SIZE / sizeof(BatchVertex)
};
void LoadVertices();
ALWAYS_INLINE void AddVertex(const BatchVertex& v)

File diff suppressed because it is too large Load Diff

View File

@ -1,143 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/d3d11/shader_cache.h"
#include "common/d3d11/stream_buffer.h"
#include "common/d3d11/texture.h"
#include "gpu_hw.h"
#include "texture_replacements.h"
#include <array>
#include <d3d11.h>
#include <memory>
#include <tuple>
#include <wrl/client.h>
class GPU_HW_D3D11 final : public GPU_HW
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
GPU_HW_D3D11(ID3D11Device* device, ID3D11DeviceContext* context);
~GPU_HW_D3D11() override;
GPURenderer GetRendererType() const override;
bool Initialize() override;
void Reset(bool clear_vram) override;
bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;
void ResetGraphicsAPIState() override;
void RestoreGraphicsAPIState() override;
void UpdateSettings() override;
protected:
void ClearDisplay() override;
void UpdateDisplay() override;
void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override;
void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override;
void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask) override;
void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override;
void UpdateVRAMReadTexture() override;
void UpdateDepthBufferFromMaskBit() override;
void ClearDepthBuffer() override;
void SetScissorFromDrawingArea() override;
void MapBatchVertexPointer(u32 required_vertices) override;
void UnmapBatchVertexPointer(u32 used_vertices) override;
void UploadUniformBuffer(const void* data, u32 data_size) override;
void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) override;
private:
enum : u32
{
// Currently we don't stream uniforms, instead just re-map the buffer every time and let the driver take care of it.
MAX_UNIFORM_BUFFER_SIZE = 64
};
void SetCapabilities();
bool CreateFramebuffer();
void ClearFramebuffer();
void DestroyFramebuffer();
bool CreateVertexBuffer();
bool CreateUniformBuffer();
bool CreateTextureBuffer();
bool CreateStateObjects();
void DestroyStateObjects();
bool CompileShaders();
void DestroyShaders();
void SetViewport(u32 x, u32 y, u32 width, u32 height);
void SetScissor(u32 x, u32 y, u32 width, u32 height);
void SetViewportAndScissor(u32 x, u32 y, u32 width, u32 height);
void DrawUtilityShader(ID3D11PixelShader* shader, const void* uniforms, u32 uniforms_size);
bool BlitVRAMReplacementTexture(const TextureReplacementTexture* tex, u32 dst_x, u32 dst_y, u32 width, u32 height);
void DownsampleFramebuffer(D3D11::Texture& source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferAdaptive(D3D11::Texture& source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferBoxFilter(D3D11::Texture& source, u32 left, u32 top, u32 width, u32 height);
ComPtr<ID3D11Device> m_device;
ComPtr<ID3D11DeviceContext> m_context;
// downsample texture - used for readbacks at >1xIR.
D3D11::Texture m_vram_texture;
D3D11::Texture m_vram_depth_texture;
ComPtr<ID3D11DepthStencilView> m_vram_depth_view;
D3D11::Texture m_vram_read_texture;
D3D11::Texture m_vram_encoding_texture;
D3D11::Texture m_display_texture;
D3D11::StreamBuffer m_vertex_stream_buffer;
D3D11::StreamBuffer m_uniform_stream_buffer;
D3D11::StreamBuffer m_texture_stream_buffer;
ComPtr<ID3D11ShaderResourceView> m_texture_stream_buffer_srv_r16ui;
ComPtr<ID3D11RasterizerState> m_cull_none_rasterizer_state;
ComPtr<ID3D11RasterizerState> m_cull_none_rasterizer_state_no_msaa;
ComPtr<ID3D11DepthStencilState> m_depth_disabled_state;
ComPtr<ID3D11DepthStencilState> m_depth_test_always_state;
ComPtr<ID3D11DepthStencilState> m_depth_test_less_state;
ComPtr<ID3D11DepthStencilState> m_depth_test_greater_state;
ComPtr<ID3D11BlendState> m_blend_disabled_state;
ComPtr<ID3D11BlendState> m_blend_no_color_writes_state;
ComPtr<ID3D11SamplerState> m_point_sampler_state;
ComPtr<ID3D11SamplerState> m_linear_sampler_state;
ComPtr<ID3D11SamplerState> m_trilinear_sampler_state;
std::array<ComPtr<ID3D11BlendState>, 5> m_batch_blend_states; // [transparency_mode]
ComPtr<ID3D11InputLayout> m_batch_input_layout;
std::array<ComPtr<ID3D11VertexShader>, 2> m_batch_vertex_shaders; // [textured]
std::array<std::array<std::array<std::array<ComPtr<ID3D11PixelShader>, 2>, 2>, 9>, 4>
m_batch_pixel_shaders; // [render_mode][texture_mode][dithering][interlacing]
ComPtr<ID3D11VertexShader> m_screen_quad_vertex_shader;
ComPtr<ID3D11VertexShader> m_uv_quad_vertex_shader;
ComPtr<ID3D11PixelShader> m_copy_pixel_shader;
std::array<std::array<ComPtr<ID3D11PixelShader>, 2>, 2> m_vram_fill_pixel_shaders; // [wrapped][interlaced]
ComPtr<ID3D11PixelShader> m_vram_read_pixel_shader;
ComPtr<ID3D11PixelShader> m_vram_write_pixel_shader;
ComPtr<ID3D11PixelShader> m_vram_copy_pixel_shader;
ComPtr<ID3D11PixelShader> m_vram_update_depth_pixel_shader;
std::array<std::array<ComPtr<ID3D11PixelShader>, 3>, 2> m_display_pixel_shaders; // [depth_24][interlaced]
D3D11::Texture m_vram_replacement_texture;
// downsampling
ComPtr<ID3D11PixelShader> m_downsample_first_pass_pixel_shader;
ComPtr<ID3D11PixelShader> m_downsample_mid_pass_pixel_shader;
ComPtr<ID3D11PixelShader> m_downsample_blur_pass_pixel_shader;
ComPtr<ID3D11PixelShader> m_downsample_composite_pixel_shader;
D3D11::Texture m_downsample_texture;
D3D11::Texture m_downsample_weight_texture;
std::vector<std::pair<ComPtr<ID3D11ShaderResourceView>, ComPtr<ID3D11RenderTargetView>>> m_downsample_mip_views;
};

File diff suppressed because it is too large Load Diff

View File

@ -1,114 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/d3d12/staging_texture.h"
#include "common/d3d12/stream_buffer.h"
#include "common/d3d12/texture.h"
#include "common/dimensional_array.h"
#include "gpu_hw.h"
#include "texture_replacements.h"
#include <array>
#include <memory>
#include <tuple>
class GPU_HW_D3D12 final : public GPU_HW
{
public:
template<typename T>
using ComPtr = Microsoft::WRL::ComPtr<T>;
GPU_HW_D3D12();
~GPU_HW_D3D12() override;
GPURenderer GetRendererType() const override;
bool Initialize() override;
void Reset(bool clear_vram) override;
void ResetGraphicsAPIState() override;
void RestoreGraphicsAPIState() override;
void UpdateSettings() override;
protected:
void ClearDisplay() override;
void UpdateDisplay() override;
void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override;
void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override;
void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask) override;
void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override;
void UpdateVRAMReadTexture() override;
void UpdateDepthBufferFromMaskBit() override;
void ClearDepthBuffer() override;
void SetScissorFromDrawingArea() override;
void MapBatchVertexPointer(u32 required_vertices) override;
void UnmapBatchVertexPointer(u32 used_vertices) override;
void UploadUniformBuffer(const void* data, u32 data_size) override;
void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) override;
private:
enum : u32
{
MAX_PUSH_CONSTANTS_SIZE = 64,
TEXTURE_REPLACEMENT_BUFFER_SIZE = 64 * 1024 * 1024,
};
void SetCapabilities();
void DestroyResources();
bool CreateRootSignatures();
bool CreateSamplers();
bool CreateFramebuffer();
void ClearFramebuffer();
void DestroyFramebuffer();
bool CreateVertexBuffer();
bool CreateUniformBuffer();
bool CreateTextureBuffer();
bool CompilePipelines();
void DestroyPipelines();
bool CreateTextureReplacementStreamBuffer();
bool BlitVRAMReplacementTexture(const TextureReplacementTexture* tex, u32 dst_x, u32 dst_y, u32 width, u32 height);
ComPtr<ID3D12RootSignature> m_batch_root_signature;
ComPtr<ID3D12RootSignature> m_single_sampler_root_signature;
D3D12::Texture m_vram_texture;
D3D12::Texture m_vram_depth_texture;
D3D12::Texture m_vram_read_texture;
D3D12::Texture m_vram_readback_texture;
D3D12::StagingTexture m_vram_readback_staging_texture;
D3D12::Texture m_display_texture;
D3D12::DescriptorHandle m_point_sampler;
D3D12::DescriptorHandle m_linear_sampler;
D3D12::StreamBuffer m_vertex_stream_buffer;
D3D12::StreamBuffer m_uniform_stream_buffer;
D3D12::StreamBuffer m_texture_stream_buffer;
D3D12::DescriptorHandle m_texture_stream_buffer_srv;
u32 m_current_uniform_buffer_offset = 0;
// [depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing]
DimensionalArray<ComPtr<ID3D12PipelineState>, 2, 2, 5, 9, 4, 2> m_batch_pipelines;
// [wrapped][interlaced]
DimensionalArray<ComPtr<ID3D12PipelineState>, 2, 2> m_vram_fill_pipelines;
// [depth_test]
std::array<ComPtr<ID3D12PipelineState>, 2> m_vram_write_pipelines;
std::array<ComPtr<ID3D12PipelineState>, 2> m_vram_copy_pipelines;
ComPtr<ID3D12PipelineState> m_vram_readback_pipeline;
ComPtr<ID3D12PipelineState> m_vram_update_depth_pipeline;
// [depth_24][interlace_mode]
DimensionalArray<ComPtr<ID3D12PipelineState>, 3, 2> m_display_pipelines;
ComPtr<ID3D12PipelineState> m_copy_pipeline;
D3D12::Texture m_vram_write_replacement_texture;
D3D12::StreamBuffer m_texture_replacment_stream_buffer;
};

File diff suppressed because it is too large Load Diff

View File

@ -1,121 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/gl/loader.h"
#include "common/gl/program.h"
#include "common/gl/shader_cache.h"
#include "common/gl/stream_buffer.h"
#include "common/gl/texture.h"
#include "gpu_hw.h"
#include "texture_replacements.h"
#include <array>
#include <memory>
#include <tuple>
class GPU_HW_OpenGL final : public GPU_HW
{
public:
GPU_HW_OpenGL();
~GPU_HW_OpenGL() override;
GPURenderer GetRendererType() const override;
bool Initialize() override;
void Reset(bool clear_vram) override;
bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;
void ResetGraphicsAPIState() override;
void RestoreGraphicsAPIState() override;
void UpdateSettings() override;
protected:
void ClearDisplay() override;
void UpdateDisplay() override;
void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override;
void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override;
void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask) override;
void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override;
void UpdateVRAMReadTexture() override;
void UpdateDepthBufferFromMaskBit() override;
void ClearDepthBuffer() override;
void SetScissorFromDrawingArea() override;
void MapBatchVertexPointer(u32 required_vertices) override;
void UnmapBatchVertexPointer(u32 used_vertices) override;
void UploadUniformBuffer(const void* data, u32 data_size) override;
void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) override;
private:
struct GLStats
{
u32 num_batches;
u32 num_vertices;
u32 num_vram_reads;
u32 num_vram_writes;
u32 num_vram_read_texture_updates;
u32 num_uniform_buffer_updates;
};
ALWAYS_INLINE bool IsGLES() const { return (m_render_api == RenderAPI::OpenGLES); }
void SetCapabilities();
bool CreateFramebuffer();
void ClearFramebuffer();
void CopyFramebufferForState(GLenum target, GLuint src_texture, u32 src_fbo, u32 src_x, u32 src_y, GLuint dst_texture,
u32 dst_fbo, u32 dst_x, u32 dst_y, u32 width, u32 height);
bool CreateVertexBuffer();
bool CreateUniformBuffer();
bool CreateTextureBuffer();
bool CompilePrograms();
void SetDepthFunc();
void SetDepthFunc(GLenum func);
void SetBlendMode();
bool BlitVRAMReplacementTexture(const TextureReplacementTexture* tex, u32 dst_x, u32 dst_y, u32 width, u32 height);
void DownsampleFramebuffer(GL::Texture& source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferBoxFilter(GL::Texture& source, u32 left, u32 top, u32 width, u32 height);
// downsample texture - used for readbacks at >1xIR.
GL::Texture m_vram_texture;
GL::Texture m_vram_depth_texture;
GL::Texture m_vram_read_texture;
GL::Texture m_vram_encoding_texture;
GL::Texture m_display_texture;
GL::Texture m_vram_write_replacement_texture;
std::unique_ptr<GL::StreamBuffer> m_vertex_stream_buffer;
GLuint m_vram_fbo_id = 0;
GLuint m_vao_id = 0;
GLuint m_attributeless_vao_id = 0;
GLuint m_state_copy_fbo_id = 0;
std::unique_ptr<GL::StreamBuffer> m_uniform_stream_buffer;
std::unique_ptr<GL::StreamBuffer> m_texture_stream_buffer;
GLuint m_texture_buffer_r16ui_texture = 0;
std::array<std::array<std::array<std::array<GL::Program, 2>, 2>, 9>, 4>
m_render_programs; // [render_mode][texture_mode][dithering][interlacing]
std::array<std::array<GL::Program, 3>, 2> m_display_programs; // [depth_24][interlaced]
std::array<std::array<GL::Program, 2>, 2> m_vram_fill_programs;
GL::Program m_vram_read_program;
GL::Program m_vram_write_program;
GL::Program m_vram_copy_program;
GL::Program m_vram_update_depth_program;
u32 m_uniform_buffer_alignment = 1;
u32 m_texture_stream_buffer_size = 0;
bool m_use_texture_buffer_for_vram_writes = false;
bool m_use_ssbo_for_vram_writes = false;
GLenum m_current_depth_test = 0;
GPUTransparencyMode m_current_transparency_mode = GPUTransparencyMode::Disabled;
BatchRenderMode m_current_render_mode = BatchRenderMode::TransparencyDisabled;
GL::Texture m_downsample_texture;
GL::Program m_downsample_program;
};

View File

@ -1162,6 +1162,8 @@ std::string GPU_HW_ShaderGen::GenerateVRAMWriteFragmentShader(bool use_ssbo)
ss << "layout(std430";
if (IsVulkan())
ss << ", set = 0, binding = 0";
else if (IsMetal())
ss << ", set = 0, binding = 1";
else if (m_use_glsl_binding_layout)
ss << ", binding = 0";
@ -1327,13 +1329,37 @@ std::string GPU_HW_ShaderGen::GenerateVRAMUpdateDepthFragmentShader()
return ss.str();
}
void GPU_HW_ShaderGen::WriteAdaptiveDownsampleUniformBuffer(std::stringstream& ss)
{
DeclareUniformBuffer(ss, {"float2 u_uv_min", "float2 u_uv_max", "float2 u_rcp_resolution", "float u_lod"}, true);
}
std::string GPU_HW_ShaderGen::GenerateAdaptiveDownsampleVertexShader()
{
std::stringstream ss;
WriteHeader(ss);
WriteAdaptiveDownsampleUniformBuffer(ss);
DeclareVertexEntryPoint(ss, {}, 0, 1, {}, true);
ss << R"(
{
v_tex0 = float2(float((v_id << 1) & 2u), float(v_id & 2u));
v_pos = float4(v_tex0 * float2(2.0f, -2.0f) + float2(-1.0f, 1.0f), 0.0f, 1.0f);
v_tex0 = u_uv_min + (u_uv_max - u_uv_min) * v_tex0;
#if API_OPENGL || API_OPENGL_ES || API_VULKAN
v_pos.y = -v_pos.y;
#endif
}
)";
return ss.str();
}
std::string GPU_HW_ShaderGen::GenerateAdaptiveDownsampleMipFragmentShader(bool first_pass)
{
std::stringstream ss;
WriteHeader(ss);
WriteCommonFunctions(ss);
WriteAdaptiveDownsampleUniformBuffer(ss);
DeclareTexture(ss, "samp0", 0, false);
DeclareUniformBuffer(ss, {"float2 u_uv_min", "float2 u_uv_max", "float2 u_rcp_resolution"}, true);
DefineMacro(ss, "FIRST_PASS", first_pass);
// mipmap_energy.glsl ported from parallel-rsx.
@ -1368,16 +1394,16 @@ float4 get_bias(float4 c00, float4 c01, float4 c10, float4 c11)
{
float2 uv = v_tex0 - (u_rcp_resolution * 0.25);
#ifdef FIRST_PASS
vec3 c00 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(0, 0)).rgb;
vec3 c01 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(0, 1)).rgb;
vec3 c10 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(1, 0)).rgb;
vec3 c11 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(1, 1)).rgb;
vec3 c00 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(0, 0)).rgb;
vec3 c01 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(0, 1)).rgb;
vec3 c10 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(1, 0)).rgb;
vec3 c11 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(1, 1)).rgb;
o_col0 = get_bias(c00, c01, c10, c11);
#else
vec4 c00 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(0, 0));
vec4 c01 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(0, 1));
vec4 c10 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(1, 0));
vec4 c11 = SAMPLE_TEXTURE_OFFSET(samp0, uv, int2(1, 1));
vec4 c00 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(0, 0));
vec4 c01 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(0, 1));
vec4 c10 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(1, 0));
vec4 c11 = SAMPLE_TEXTURE_LEVEL_OFFSET(samp0, uv, u_lod, int2(1, 1));
o_col0 = get_bias(c00, c01, c10, c11);
#endif
}
@ -1391,9 +1417,8 @@ std::string GPU_HW_ShaderGen::GenerateAdaptiveDownsampleBlurFragmentShader()
std::stringstream ss;
WriteHeader(ss);
WriteCommonFunctions(ss);
WriteAdaptiveDownsampleUniformBuffer(ss);
DeclareTexture(ss, "samp0", 0, false);
DeclareUniformBuffer(ss, {"float2 u_uv_min", "float2 u_uv_max", "float2 u_rcp_resolution", "float sample_level"},
true);
// mipmap_blur.glsl ported from parallel-rsx.
DeclareFragmentEntryPoint(ss, 0, 1, {}, false, 1, false, false, false, false);

View File

@ -24,6 +24,7 @@ public:
std::string GenerateVRAMFillFragmentShader(bool wrapped, bool interlaced);
std::string GenerateVRAMUpdateDepthFragmentShader();
std::string GenerateAdaptiveDownsampleVertexShader();
std::string GenerateAdaptiveDownsampleMipFragmentShader(bool first_pass);
std::string GenerateAdaptiveDownsampleBlurFragmentShader();
std::string GenerateAdaptiveDownsampleCompositeFragmentShader();
@ -36,6 +37,7 @@ private:
void WriteCommonFunctions(std::stringstream& ss);
void WriteBatchUniformBuffer(std::stringstream& ss);
void WriteBatchTextureFilter(std::stringstream& ss, GPUTextureFilter texture_filter);
void WriteAdaptiveDownsampleUniformBuffer(std::stringstream& ss);
u32 m_resolution_scale;
u32 m_multisamples;

File diff suppressed because it is too large Load Diff

View File

@ -1,169 +0,0 @@
// SPDX-FileCopyrightText: 2019-2022 Connor McLaughlin <stenzek@gmail.com>
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/dimensional_array.h"
#include "common/vulkan/stream_buffer.h"
#include "common/vulkan/texture.h"
#include "gpu_hw.h"
#include "texture_replacements.h"
#include <array>
#include <memory>
#include <tuple>
class GPU_HW_Vulkan final : public GPU_HW
{
public:
GPU_HW_Vulkan();
~GPU_HW_Vulkan() override;
GPURenderer GetRendererType() const override;
bool Initialize() override;
void Reset(bool clear_vram) override;
bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;
void ResetGraphicsAPIState() override;
void RestoreGraphicsAPIState() override;
void UpdateSettings() override;
protected:
void ClearDisplay() override;
void UpdateDisplay() override;
void ReadVRAM(u32 x, u32 y, u32 width, u32 height) override;
void FillVRAM(u32 x, u32 y, u32 width, u32 height, u32 color) override;
void UpdateVRAM(u32 x, u32 y, u32 width, u32 height, const void* data, bool set_mask, bool check_mask) override;
void CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32 height) override;
void UpdateVRAMReadTexture() override;
void UpdateDepthBufferFromMaskBit() override;
void ClearDepthBuffer() override;
void SetScissorFromDrawingArea() override;
void MapBatchVertexPointer(u32 required_vertices) override;
void UnmapBatchVertexPointer(u32 used_vertices) override;
void UploadUniformBuffer(const void* data, u32 data_size) override;
void DrawBatchVertices(BatchRenderMode render_mode, u32 base_vertex, u32 num_vertices) override;
private:
enum : u32
{
MAX_PUSH_CONSTANTS_SIZE = 64,
};
void SetCapabilities();
void DestroyResources();
ALWAYS_INLINE bool InRenderPass() const { return (m_current_render_pass != VK_NULL_HANDLE); }
void BeginRenderPass(VkRenderPass render_pass, VkFramebuffer framebuffer, u32 x, u32 y, u32 width, u32 height,
const VkClearValue* clear_value = nullptr);
void BeginVRAMRenderPass();
void EndRenderPass();
void ExecuteCommandBuffer(bool wait_for_completion, bool restore_state);
bool CreatePipelineLayouts();
bool CreateSamplers();
bool CreateFramebuffer();
void ClearFramebuffer();
void DestroyFramebuffer();
bool CreateVertexBuffer();
bool CreateUniformBuffer();
bool CreateTextureBuffer();
bool CompilePipelines();
void DestroyPipelines();
bool BlitVRAMReplacementTexture(const TextureReplacementTexture* tex, u32 dst_x, u32 dst_y, u32 width, u32 height);
void DownsampleFramebuffer(Vulkan::Texture& source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferBoxFilter(Vulkan::Texture& source, u32 left, u32 top, u32 width, u32 height);
void DownsampleFramebufferAdaptive(Vulkan::Texture& source, u32 left, u32 top, u32 width, u32 height);
VkRenderPass m_current_render_pass = VK_NULL_HANDLE;
VkRenderPass m_vram_render_pass = VK_NULL_HANDLE;
VkRenderPass m_vram_update_depth_render_pass = VK_NULL_HANDLE;
VkRenderPass m_display_load_render_pass = VK_NULL_HANDLE;
VkRenderPass m_display_discard_render_pass = VK_NULL_HANDLE;
VkRenderPass m_vram_readback_render_pass = VK_NULL_HANDLE;
VkDescriptorSetLayout m_batch_descriptor_set_layout = VK_NULL_HANDLE;
VkDescriptorSetLayout m_single_sampler_descriptor_set_layout = VK_NULL_HANDLE;
VkDescriptorSetLayout m_vram_write_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_batch_pipeline_layout = VK_NULL_HANDLE;
VkPipelineLayout m_no_samplers_pipeline_layout = VK_NULL_HANDLE;
VkPipelineLayout m_single_sampler_pipeline_layout = VK_NULL_HANDLE;
VkPipelineLayout m_vram_write_pipeline_layout = VK_NULL_HANDLE;
Vulkan::Texture m_vram_texture;
Vulkan::Texture m_vram_depth_texture;
Vulkan::Texture m_vram_read_texture;
Vulkan::Texture m_vram_readback_texture;
Vulkan::Texture m_display_texture;
bool m_use_ssbos_for_vram_writes = false;
VkFramebuffer m_vram_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_vram_update_depth_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_vram_readback_framebuffer = VK_NULL_HANDLE;
VkFramebuffer m_display_framebuffer = VK_NULL_HANDLE;
VkSampler m_point_sampler = VK_NULL_HANDLE;
VkSampler m_linear_sampler = VK_NULL_HANDLE;
VkSampler m_trilinear_sampler = VK_NULL_HANDLE;
VkDescriptorSet m_batch_descriptor_set = VK_NULL_HANDLE;
VkDescriptorSet m_vram_copy_descriptor_set = VK_NULL_HANDLE;
VkDescriptorSet m_vram_read_descriptor_set = VK_NULL_HANDLE;
VkDescriptorSet m_vram_write_descriptor_set = VK_NULL_HANDLE;
VkDescriptorSet m_display_descriptor_set = VK_NULL_HANDLE;
Vulkan::StreamBuffer m_vertex_stream_buffer;
Vulkan::StreamBuffer m_uniform_stream_buffer;
Vulkan::StreamBuffer m_texture_stream_buffer;
u32 m_current_uniform_buffer_offset = 0;
VkBufferView m_texture_stream_buffer_view = VK_NULL_HANDLE;
// [depth_test][render_mode][texture_mode][transparency_mode][dithering][interlacing]
DimensionalArray<VkPipeline, 2, 2, 5, 9, 4, 3> m_batch_pipelines{};
// [wrapped][interlaced]
DimensionalArray<VkPipeline, 2, 2> m_vram_fill_pipelines{};
// [depth_test]
std::array<VkPipeline, 2> m_vram_write_pipelines{};
std::array<VkPipeline, 2> m_vram_copy_pipelines{};
VkPipeline m_vram_readback_pipeline = VK_NULL_HANDLE;
VkPipeline m_vram_update_depth_pipeline = VK_NULL_HANDLE;
// [depth_24][interlace_mode]
DimensionalArray<VkPipeline, 3, 2> m_display_pipelines{};
// texture replacements
Vulkan::Texture m_vram_write_replacement_texture;
// downsampling
Vulkan::Texture m_downsample_texture;
VkRenderPass m_downsample_render_pass = VK_NULL_HANDLE;
Vulkan::Texture m_downsample_weight_texture;
VkRenderPass m_downsample_weight_render_pass = VK_NULL_HANDLE;
VkFramebuffer m_downsample_weight_framebuffer = VK_NULL_HANDLE;
struct SmoothMipView
{
VkImageView image_view = VK_NULL_HANDLE;
VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
VkFramebuffer framebuffer = VK_NULL_HANDLE;
};
std::vector<SmoothMipView> m_downsample_mip_views;
VkPipelineLayout m_downsample_pipeline_layout = VK_NULL_HANDLE;
VkDescriptorSetLayout m_downsample_composite_descriptor_set_layout = VK_NULL_HANDLE;
VkPipelineLayout m_downsample_composite_pipeline_layout = VK_NULL_HANDLE;
VkDescriptorSet m_downsample_composite_descriptor_set = VK_NULL_HANDLE;
VkPipeline m_downsample_first_pass_pipeline = VK_NULL_HANDLE;
VkPipeline m_downsample_mid_pass_pipeline = VK_NULL_HANDLE;
VkPipeline m_downsample_blur_pass_pipeline = VK_NULL_HANDLE;
VkPipeline m_downsample_composite_pass_pipeline = VK_NULL_HANDLE;
};

View File

@ -2,14 +2,18 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "gpu_sw.h"
#include "system.h"
#include "util/gpu_device.h"
#include "common/align.h"
#include "common/assert.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/platform.h"
#include "util/host_display.h"
#include "system.h"
#include <algorithm>
Log_SetChannel(GPU_SW);
#if defined(CPU_X64)
@ -39,12 +43,7 @@ GPU_SW::GPU_SW()
GPU_SW::~GPU_SW()
{
m_backend.Shutdown();
g_host_display->ClearDisplayTexture();
}
GPURenderer GPU_SW::GetRendererType() const
{
return GPURenderer::Software;
g_gpu_device->ClearDisplayTexture();
}
const Threading::Thread* GPU_SW::GetSWThread() const
@ -52,6 +51,11 @@ const Threading::Thread* GPU_SW::GetSWThread() const
return m_backend.GetThread();
}
bool GPU_SW::IsHardwareRenderer() const
{
return false;
}
bool GPU_SW::Initialize()
{
if (!GPU::Initialize() || !m_backend.Initialize(false))
@ -63,7 +67,7 @@ bool GPU_SW::Initialize()
GPUTexture::Format::RGB565, GPUTexture::Format::RGBA5551);
for (const GPUTexture::Format format : formats_for_16bit)
{
if (g_host_display->SupportsTextureFormat(format))
if (g_gpu_device->SupportsTextureFormat(format))
{
m_16bit_display_format = format;
break;
@ -71,7 +75,7 @@ bool GPU_SW::Initialize()
}
for (const GPUTexture::Format format : formats_for_24bit)
{
if (g_host_display->SupportsTextureFormat(format))
if (g_gpu_device->SupportsTextureFormat(format))
{
m_24bit_display_format = format;
break;
@ -105,9 +109,10 @@ GPUTexture* GPU_SW::GetDisplayTexture(u32 width, u32 height, GPUTexture::Format
if (!m_display_texture || m_display_texture->GetWidth() != width || m_display_texture->GetHeight() != height ||
m_display_texture->GetFormat() != format)
{
g_host_display->ClearDisplayTexture();
g_gpu_device->ClearDisplayTexture();
m_display_texture.reset();
m_display_texture = g_host_display->CreateTexture(width, height, 1, 1, 1, format, nullptr, 0, true);
m_display_texture =
g_gpu_device->CreateTexture(width, height, 1, 1, 1, GPUTexture::Type::Texture, format, nullptr, 0, true);
if (!m_display_texture)
Log_ErrorPrintf("Failed to create %ux%u %u texture", width, height, static_cast<u32>(format));
}
@ -264,7 +269,7 @@ void GPU_SW::CopyOut15Bit(u32 src_x, u32 src_y, u32 width, u32 height, u32 field
if (!interlaced)
{
if (!g_host_display->BeginTextureUpdate(texture, width, height, reinterpret_cast<void**>(&dst_ptr), &dst_stride))
if (!texture->Map(reinterpret_cast<void**>(&dst_ptr), &dst_stride, 0, 0, width, height))
return;
}
else
@ -312,11 +317,11 @@ void GPU_SW::CopyOut15Bit(u32 src_x, u32 src_y, u32 width, u32 height, u32 field
}
if (!interlaced)
g_host_display->EndTextureUpdate(texture, 0, 0, width, height);
texture->Unmap();
else
g_host_display->UpdateTexture(texture, 0, 0, width, height, m_display_texture_buffer.data(), output_stride);
texture->Update(0, 0, width, height, m_display_texture_buffer.data(), output_stride);
g_host_display->SetDisplayTexture(texture, 0, 0, width, height);
g_gpu_device->SetDisplayTexture(texture, 0, 0, width, height);
}
void GPU_SW::CopyOut15Bit(GPUTexture::Format display_format, u32 src_x, u32 src_y, u32 width, u32 height, u32 field,
@ -358,7 +363,7 @@ void GPU_SW::CopyOut24Bit(u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 heigh
if (!interlaced)
{
if (!g_host_display->BeginTextureUpdate(texture, width, height, reinterpret_cast<void**>(&dst_ptr), &dst_stride))
if (!texture->Map(reinterpret_cast<void**>(&dst_ptr), &dst_stride, 0, 0, width, height))
return;
}
else
@ -470,11 +475,11 @@ void GPU_SW::CopyOut24Bit(u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 heigh
}
if (!interlaced)
g_host_display->EndTextureUpdate(texture, 0, 0, width, height);
texture->Unmap();
else
g_host_display->UpdateTexture(texture, 0, 0, width, height, m_display_texture_buffer.data(), output_stride);
texture->Update(0, 0, width, height, m_display_texture_buffer.data(), output_stride);
g_host_display->SetDisplayTexture(texture, 0, 0, width, height);
g_gpu_device->SetDisplayTexture(texture, 0, 0, width, height);
}
void GPU_SW::CopyOut24Bit(GPUTexture::Format display_format, u32 src_x, u32 src_y, u32 skip_x, u32 width, u32 height,
@ -511,14 +516,14 @@ void GPU_SW::UpdateDisplay()
if (!g_settings.debugging.show_vram)
{
g_host_display->SetDisplayParameters(m_crtc_state.display_width, m_crtc_state.display_height,
g_gpu_device->SetDisplayParameters(m_crtc_state.display_width, m_crtc_state.display_height,
m_crtc_state.display_origin_left, m_crtc_state.display_origin_top,
m_crtc_state.display_vram_width, m_crtc_state.display_vram_height,
GetDisplayAspectRatio());
if (IsDisplayDisabled())
{
g_host_display->ClearDisplayTexture();
g_gpu_device->ClearDisplayTexture();
return;
}
@ -559,7 +564,7 @@ void GPU_SW::UpdateDisplay()
else
{
CopyOut15Bit(m_16bit_display_format, 0, 0, VRAM_WIDTH, VRAM_HEIGHT, 0, false, false);
g_host_display->SetDisplayParameters(VRAM_WIDTH, VRAM_HEIGHT, 0, 0, VRAM_WIDTH, VRAM_HEIGHT,
g_gpu_device->SetDisplayParameters(VRAM_WIDTH, VRAM_HEIGHT, 0, 0, VRAM_WIDTH, VRAM_HEIGHT,
static_cast<float>(VRAM_WIDTH) / static_cast<float>(VRAM_HEIGHT));
}
}
@ -894,10 +899,6 @@ void GPU_SW::CopyVRAM(u32 src_x, u32 src_y, u32 dst_x, u32 dst_y, u32 width, u32
std::unique_ptr<GPU> GPU::CreateSoftwareRenderer()
{
// we need something to draw in.. but keep the current api if we have one
if (!g_host_display && !Host::AcquireHostDisplay(HostDisplay::GetPreferredAPI()))
return nullptr;
std::unique_ptr<GPU_SW> gpu(std::make_unique<GPU_SW>());
if (!gpu->Initialize())
return nullptr;

View File

@ -2,16 +2,18 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "common/heap_array.h"
#include "gpu.h"
#include "gpu_sw_backend.h"
#include "util/host_display.h"
#include "util/gpu_device.h"
#include "common/heap_array.h"
#include <array>
#include <memory>
#include <vector>
namespace Threading
{
namespace Threading {
class Thread;
}
@ -25,8 +27,8 @@ public:
ALWAYS_INLINE const GPU_SW_Backend& GetBackend() const { return m_backend; }
GPURenderer GetRendererType() const override;
const Threading::Thread* GetSWThread() const override;
bool IsHardwareRenderer() const override;
bool Initialize() override;
bool DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_display) override;

View File

@ -2,11 +2,13 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "gpu_sw_backend.h"
#include "system.h"
#include "util/gpu_device.h"
#include "common/assert.h"
#include "common/log.h"
#include "gpu_sw_backend.h"
#include "util/host_display.h"
#include "system.h"
#include <algorithm>
Log_SetChannel(GPU_SW_Backend);

View File

@ -2,15 +2,19 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "gte.h"
#include "common/assert.h"
#include "common/bitutils.h"
#include "cpu_core.h"
#include "cpu_core_private.h"
#include "util/host_display.h"
#include "pgxp.h"
#include "settings.h"
#include "timing_event.h"
#include "util/gpu_device.h"
#include "util/state_wrapper.h"
#include "common/assert.h"
#include "common/bitutils.h"
#include <algorithm>
#include <array>
#include <numeric>
@ -190,14 +194,14 @@ void UpdateAspectRatio()
{
case DisplayAspectRatio::MatchWindow:
{
if (!g_host_display)
if (!g_gpu_device)
{
s_aspect_ratio = DisplayAspectRatio::R4_3;
return;
}
num = g_host_display->GetWindowWidth();
denom = g_host_display->GetWindowHeight();
num = g_gpu_device->GetWindowWidth();
denom = g_gpu_device->GetWindowHeight();
}
break;

View File

@ -2,15 +2,19 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "guncon.h"
#include "common/assert.h"
#include "common/log.h"
#include "gpu.h"
#include "host.h"
#include "resources.h"
#include "system.h"
#include "util/host_display.h"
#include "util/gpu_device.h"
#include "util/state_wrapper.h"
#include "common/assert.h"
#include "common/log.h"
#include <array>
Log_SetChannel(GunCon);
static constexpr std::array<u8, static_cast<size_t>(GunCon::Button::Count)> s_button_indices = {{13, 3, 14}};
@ -177,8 +181,8 @@ bool GunCon::Transfer(const u8 data_in, u8* data_out)
void GunCon::UpdatePosition()
{
// get screen coordinates
const s32 mouse_x = g_host_display->GetMousePositionX();
const s32 mouse_y = g_host_display->GetMousePositionY();
const s32 mouse_x = g_gpu_device->GetMousePositionX();
const s32 mouse_y = g_gpu_device->GetMousePositionY();
// are we within the active display area?
u32 tick, line;

View File

@ -2,6 +2,12 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "host.h"
#include "common_host.h"
#include "fullscreen_ui.h"
#include "imgui_overlays.h"
#include "util/gpu_device.h"
#include "util/imgui_manager.h"
#include "common/assert.h"
#include "common/heterogeneous_containers.h"
@ -155,3 +161,31 @@ void Host::ReportFormattedDebuggerMessage(const char* format, ...)
ReportDebuggerMessage(message);
}
void Host::RenderDisplay(bool skip_present)
{
Host::BeginPresentFrame();
// acquire for IO.MousePos.
std::atomic_thread_fence(std::memory_order_acquire);
if (!skip_present)
{
FullscreenUI::Render();
ImGuiManager::RenderTextOverlays();
ImGuiManager::RenderOSDMessages();
}
// Debug windows are always rendered, otherwise mouse input breaks on skip.
ImGuiManager::RenderOverlayWindows();
ImGuiManager::RenderDebugWindows();
g_gpu_device->Render(skip_present);
ImGuiManager::NewFrame();
}
void Host::InvalidateDisplay()
{
RenderDisplay(false);
}

View File

@ -101,6 +101,17 @@ void OpenURL(const std::string_view& url);
/// Copies the provided text to the host's clipboard, if present.
bool CopyTextToClipboard(const std::string_view& text);
/// Requests shut down and exit of the hosting application. This may not actually exit,
/// if the user cancels the shutdown confirmation.
void RequestExit(bool allow_confirm);
/// Called before drawing the OSD and other display elements.
void BeginPresentFrame();
/// Provided by the host; renders the display.
void RenderDisplay(bool skip_present);
void InvalidateDisplay();
namespace Internal {
/// Implementation to retrieve a translated string.
s32 GetTranslatedStringImpl(const std::string_view& context, const std::string_view& msg, char* tbuf,

View File

@ -13,7 +13,7 @@
#include "system.h"
#include "util/audio_stream.h"
#include "util/host_display.h"
#include "util/gpu_device.h"
#include "util/imgui_fullscreen.h"
#include "util/imgui_manager.h"
#include "util/input_manager.h"
@ -240,6 +240,7 @@ void ImGuiManager::DrawPerformanceOverlay()
if (g_settings.display_show_resolution)
{
// TODO: this seems wrong?
const auto [effective_width, effective_height] = g_gpu->GetEffectiveDisplayResolution();
const bool interlaced = g_gpu->IsInterlacedDisplayEnabled();
const bool pal = g_gpu->IsInPALMode();
@ -317,7 +318,7 @@ void ImGuiManager::DrawPerformanceOverlay()
#endif
}
if (g_settings.display_show_gpu && g_host_display->IsGPUTimingEnabled())
if (g_settings.display_show_gpu && g_gpu_device->IsGPUTimingEnabled())
{
text.Assign("GPU: ");
FormatProcessorStat(text, System::GetGPUUsage(), System::GetGPUAverageTime());
@ -411,8 +412,9 @@ void ImGuiManager::DrawPerformanceOverlay()
void ImGuiManager::DrawEnhancementsOverlay()
{
LargeString text;
text.AppendFmtString("{} {}", Settings::GetConsoleRegionName(System::GetRegion()),
Settings::GetRendererName(g_gpu->GetRendererType()));
text.AppendFmtString("{} {}-{}", Settings::GetConsoleRegionName(System::GetRegion()),
GPUDevice::RenderAPIToString(g_gpu_device->GetRenderAPI()),
g_gpu->IsHardwareRenderer() ? "HW" : "SW");
if (g_settings.rewind_enable)
text.AppendFormattedString(" RW=%g/%u", g_settings.rewind_save_frequency, g_settings.rewind_save_slots);
@ -722,19 +724,20 @@ void SaveStateSelectorUI::InitializeListEntry(ListEntry* li, ExtendedSaveStateIn
li->preview_texture.reset();
// Might not have a display yet, we're called at startup..
if (g_host_display)
if (g_gpu_device)
{
if (ssi && !ssi->screenshot_data.empty())
{
li->preview_texture =
g_host_display->CreateTexture(ssi->screenshot_width, ssi->screenshot_height, 1, 1, 1, GPUTexture::Format::RGBA8,
li->preview_texture = g_gpu_device->CreateTexture(
ssi->screenshot_width, ssi->screenshot_height, 1, 1, 1, GPUTexture::Type::Texture, GPUTexture::Format::RGBA8,
ssi->screenshot_data.data(), sizeof(u32) * ssi->screenshot_width, false);
}
else
{
li->preview_texture = g_host_display->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Format::RGBA8,
Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH, false);
li->preview_texture = g_gpu_device->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Type::Texture,
GPUTexture::Format::RGBA8, Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH,
false);
}
if (!li->preview_texture)
@ -751,11 +754,12 @@ void SaveStateSelectorUI::InitializePlaceholderListEntry(ListEntry* li, std::str
li->slot = slot;
li->global = global;
if (g_host_display)
if (g_gpu_device)
{
li->preview_texture = g_host_display->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Format::RGBA8,
Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH, false);
li->preview_texture = g_gpu_device->CreateTexture(
Resources::PLACEHOLDER_ICON_WIDTH, Resources::PLACEHOLDER_ICON_HEIGHT, 1, 1, 1, GPUTexture::Type::Texture,
GPUTexture::Format::RGBA8, Resources::PLACEHOLDER_ICON_DATA, sizeof(u32) * Resources::PLACEHOLDER_ICON_WIDTH,
false);
if (!li->preview_texture)
Log_ErrorPrintf("Failed to upload save state image to GPU");
}

View File

@ -2,22 +2,26 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#include "playstation_mouse.h"
#include "common/assert.h"
#include "common/log.h"
#include "gpu.h"
#include "host.h"
#include "util/host_display.h"
#include "system.h"
#include "util/gpu_device.h"
#include "util/state_wrapper.h"
#include "common/assert.h"
#include "common/log.h"
#include <array>
Log_SetChannel(PlayStationMouse);
static constexpr std::array<u8, static_cast<size_t>(PlayStationMouse::Button::Count)> s_button_indices = {{11, 10}};
PlayStationMouse::PlayStationMouse(u32 index) : Controller(index)
{
m_last_host_position_x = g_host_display->GetMousePositionX();
m_last_host_position_y = g_host_display->GetMousePositionY();
m_last_host_position_x = g_gpu_device->GetMousePositionX();
m_last_host_position_y = g_gpu_device->GetMousePositionY();
}
PlayStationMouse::~PlayStationMouse() = default;
@ -157,8 +161,8 @@ bool PlayStationMouse::Transfer(const u8 data_in, u8* data_out)
void PlayStationMouse::UpdatePosition()
{
// get screen coordinates
const s32 mouse_x = g_host_display->GetMousePositionX();
const s32 mouse_y = g_host_display->GetMousePositionY();
const s32 mouse_x = g_gpu_device->GetMousePositionX();
const s32 mouse_y = g_gpu_device->GetMousePositionY();
const s32 delta_x = mouse_x - m_last_host_position_x;
const s32 delta_y = mouse_y - m_last_host_position_y;
m_last_host_position_x = mouse_x;

View File

@ -3,21 +3,25 @@
#include "settings.h"
#include "achievements.h"
#include "controller.h"
#include "host.h"
#include "host_settings.h"
#include "system.h"
#include "util/gpu_device.h"
#include "common/assert.h"
#include "common/file_system.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/path.h"
#include "common/string_util.h"
#include "controller.h"
#include "host.h"
#include "host_settings.h"
#include "system.h"
#include "util/host_display.h"
#include <algorithm>
#include <array>
#include <cctype>
#include <numeric>
Log_SetChannel(Settings);
Settings g_settings;
@ -204,6 +208,7 @@ void Settings::Load(SettingsInterface& si)
gpu_resolution_scale = static_cast<u32>(si.GetIntValue("GPU", "ResolutionScale", 1));
gpu_multisamples = static_cast<u32>(si.GetIntValue("GPU", "Multisamples", 1));
gpu_use_debug_device = si.GetBoolValue("GPU", "UseDebugDevice", false);
gpu_disable_shader_cache = si.GetBoolValue("GPU", "DisableShaderCache", false);
gpu_per_sample_shading = si.GetBoolValue("GPU", "PerSampleShading", false);
gpu_use_thread = si.GetBoolValue("GPU", "UseThread", true);
gpu_use_software_renderer_for_readbacks = si.GetBoolValue("GPU", "UseSoftwareRendererForReadbacks", false);
@ -440,6 +445,7 @@ void Settings::Save(SettingsInterface& si) const
si.SetIntValue("GPU", "ResolutionScale", static_cast<long>(gpu_resolution_scale));
si.SetIntValue("GPU", "Multisamples", static_cast<long>(gpu_multisamples));
si.SetBoolValue("GPU", "UseDebugDevice", gpu_use_debug_device);
si.SetBoolValue("GPU", "DisableShaderCache", gpu_disable_shader_cache);
si.SetBoolValue("GPU", "PerSampleShading", gpu_per_sample_shading);
si.SetBoolValue("GPU", "UseThread", gpu_use_thread);
si.SetBoolValue("GPU", "ThreadedPresentation", gpu_threaded_presentation);
@ -877,6 +883,9 @@ static constexpr auto s_gpu_renderer_names = make_array(
#ifdef _WIN32
"D3D11", "D3D12",
#endif
#ifdef __APPLE__
"Metal",
#endif
#ifdef WITH_VULKAN
"Vulkan",
#endif
@ -888,6 +897,9 @@ static constexpr auto s_gpu_renderer_display_names = make_array(
#ifdef _WIN32
TRANSLATE_NOOP("GPURenderer", "Hardware (D3D11)"), TRANSLATE_NOOP("GPURenderer", "Hardware (D3D12)"),
#endif
#ifdef __APPLE__
TRANSLATE_NOOP("GPURenderer", "Hardware (Metal)"),
#endif
#ifdef WITH_VULKAN
TRANSLATE_NOOP("GPURenderer", "Hardware (Vulkan)"),
#endif
@ -930,6 +942,9 @@ RenderAPI Settings::GetRenderAPIForRenderer(GPURenderer renderer)
case GPURenderer::HardwareD3D12:
return RenderAPI::D3D12;
#endif
#ifdef __APPLE__
return RenderAPI::Metal;
#endif
#ifdef WITH_VULKAN
case GPURenderer::HardwareVulkan:
return RenderAPI::Vulkan;
@ -940,7 +955,7 @@ RenderAPI Settings::GetRenderAPIForRenderer(GPURenderer renderer)
#endif
case GPURenderer::Software:
default:
return HostDisplay::GetPreferredAPI();
return GPUDevice::GetPreferredAPI();
}
}
@ -1073,11 +1088,10 @@ float Settings::GetDisplayAspectRatioValue() const
{
case DisplayAspectRatio::MatchWindow:
{
if (!g_host_display)
if (!g_gpu_device)
return s_display_aspect_ratio_values[static_cast<int>(DEFAULT_DISPLAY_ASPECT_RATIO)];
return static_cast<float>(g_host_display->GetWindowWidth()) /
static_cast<float>(g_host_display->GetWindowHeight());
return static_cast<float>(g_gpu_device->GetWindowWidth()) / static_cast<float>(g_gpu_device->GetWindowHeight());
}
case DisplayAspectRatio::Custom:

View File

@ -100,6 +100,7 @@ struct Settings
bool gpu_use_software_renderer_for_readbacks = false;
bool gpu_threaded_presentation = true;
bool gpu_use_debug_device = false;
bool gpu_disable_shader_cache = false;
bool gpu_per_sample_shading = false;
bool gpu_true_color = true;
bool gpu_scaled_dithering = true;
@ -401,7 +402,9 @@ struct Settings
static constexpr GPURenderer DEFAULT_GPU_RENDERER = GPURenderer::HardwareD3D12;
#elif defined(_WIN32)
static constexpr GPURenderer DEFAULT_GPU_RENDERER = GPURenderer::HardwareD3D11;
#elif defined(WITH_OPENGL) && (!defined(__APPLE__) || !defined(WITH_VULKAN))
#elif defined(__APPLE__)
static constexpr GPURenderer DEFAULT_GPU_RENDERER = GPURenderer::HardwareMetal;
#elif defined(WITH_OPENGL)
static constexpr GPURenderer DEFAULT_GPU_RENDERER = GPURenderer::HardwareOpenGL;
#elif defined(WITH_VULKAN)
static constexpr GPURenderer DEFAULT_GPU_RENDERER = GPURenderer::HardwareVulkan;

View File

@ -2,6 +2,6 @@
// SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0)
#pragma once
#include "types.h"
#include "common/types.h"
static constexpr u32 SHADER_CACHE_VERSION = 7;
static constexpr u32 SHADER_CACHE_VERSION = 9;

View File

@ -8,13 +8,6 @@
#include "bus.h"
#include "cdrom.h"
#include "cheats.h"
#include "common/error.h"
#include "common/file_system.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/path.h"
#include "common/string_util.h"
#include "common/threading.h"
#include "controller.h"
#include "cpu_code_cache.h"
#include "cpu_core.h"
@ -40,13 +33,24 @@
#include "spu.h"
#include "texture_replacements.h"
#include "timers.h"
#include "util/audio_stream.h"
#include "util/cd_image.h"
#include "util/host_display.h"
#include "util/gpu_device.h"
#include "util/ini_settings_interface.h"
#include "util/iso_reader.h"
#include "util/state_wrapper.h"
#include "common/error.h"
#include "common/file_system.h"
#include "common/log.h"
#include "common/make_array.h"
#include "common/path.h"
#include "common/string_util.h"
#include "common/threading.h"
#include "xxhash.h"
#include <cctype>
#include <cinttypes>
#include <cmath>
@ -55,6 +59,7 @@
#include <fstream>
#include <limits>
#include <thread>
Log_SetChannel(System);
#ifdef _WIN32
@ -70,7 +75,9 @@ SystemBootParameters::SystemBootParameters(const SystemBootParameters&) = defaul
SystemBootParameters::SystemBootParameters(SystemBootParameters&& other) = default;
SystemBootParameters::SystemBootParameters(std::string filename_) : filename(std::move(filename_)) {}
SystemBootParameters::SystemBootParameters(std::string filename_) : filename(std::move(filename_))
{
}
SystemBootParameters::~SystemBootParameters() = default;
@ -135,6 +142,7 @@ static std::string s_input_profile_name;
static System::State s_state = System::State::Shutdown;
static std::atomic_bool s_startup_cancelled{false};
static bool s_keep_gpu_device_on_shutdown = false;
static ConsoleRegion s_region = ConsoleRegion::NTSC_U;
TickCount System::g_ticks_per_second = System::MASTER_CLOCK;
@ -799,12 +807,10 @@ bool System::RecreateGPU(GPURenderer renderer, bool force_recreate_display, bool
if (!state_valid)
Log_ErrorPrintf("Failed to save old GPU state when switching renderers");
g_gpu->ResetGraphicsAPIState();
// create new renderer
g_gpu.reset();
if (force_recreate_display)
Host::ReleaseHostDisplay();
Host::ReleaseGPUDevice();
if (!CreateGPU(renderer))
{
@ -822,7 +828,6 @@ bool System::RecreateGPU(GPURenderer renderer, bool force_recreate_display, bool
g_gpu->RestoreGraphicsAPIState();
g_gpu->DoState(sw, nullptr, update_display);
TimingEvents::DoState(sw);
g_gpu->ResetGraphicsAPIState();
}
// fix up vsync etc
@ -1062,6 +1067,7 @@ bool System::LoadState(const char* filename)
ResetPerformanceCounters();
ResetThrottler();
Host::RenderDisplay(false);
g_gpu->RestoreGraphicsAPIState();
Log_VerbosePrintf("Loading state took %.2f msec", load_timer.GetTimeMilliseconds());
return true;
}
@ -1135,6 +1141,7 @@ bool System::BootSystem(SystemBootParameters parameters)
Assert(s_state == State::Shutdown);
s_state = State::Starting;
s_startup_cancelled.store(false);
s_keep_gpu_device_on_shutdown = static_cast<bool>(g_gpu_device);
s_region = g_settings.region;
Host::OnSystemStarting();
@ -1437,7 +1444,11 @@ bool System::Initialize(bool force_software_renderer)
if (IsStartupCancelled())
{
g_gpu.reset();
Host::ReleaseHostDisplay();
if (!s_keep_gpu_device_on_shutdown)
{
Host::ReleaseGPUDevice();
Host::ReleaseRenderWindow();
}
if (g_settings.gpu_pgxp_enable)
PGXP::Shutdown();
CPU::Shutdown();
@ -1519,8 +1530,6 @@ void System::DestroySystem()
Timers::Shutdown();
Pad::Shutdown();
CDROM::Shutdown();
if (g_gpu)
g_gpu->ResetGraphicsAPIState();
g_gpu.reset();
InterruptController::Shutdown();
DMA::Shutdown();
@ -1532,11 +1541,15 @@ void System::DestroySystem()
ClearRunningGame();
// Restore present-all-frames behavior.
if (g_host_display)
if (s_keep_gpu_device_on_shutdown && g_gpu_device)
{
g_host_display->SetDisplayMaxFPS(0.0f);
g_gpu_device->SetDisplayMaxFPS(0.0f);
UpdateSoftwareCursor();
Host::ReleaseHostDisplay();
}
else
{
Host::ReleaseGPUDevice();
Host::ReleaseRenderWindow();
}
s_bios_hash = {};
@ -1601,8 +1614,6 @@ void System::Execute()
else
CPU::Execute();
g_gpu->ResetGraphicsAPIState();
s_system_executing = false;
continue;
}
@ -1624,6 +1635,9 @@ void System::FrameDone()
{
s_frame_number++;
// Vertex buffer is shared, need to flush what we have.
g_gpu->FlushRender();
// Generate any pending samples from the SPU before sleeping, this way we reduce the chances of underruns.
SPU::GeneratePendingSamples();
@ -1680,14 +1694,11 @@ void System::FrameDone()
{
s_last_frame_skipped = false;
// TODO: Purge reset/restore
g_gpu->ResetGraphicsAPIState();
const bool skip_present = g_host_display->ShouldSkipDisplayingFrame();
const bool skip_present = g_gpu_device->ShouldSkipDisplayingFrame();
Host::RenderDisplay(skip_present);
if (!skip_present && g_host_display->IsGPUTimingEnabled())
if (!skip_present && g_gpu_device->IsGPUTimingEnabled())
{
s_accumulated_gpu_time += g_host_display->GetAndResetAccumulatedGPUTime();
s_accumulated_gpu_time += g_gpu_device->GetAndResetAccumulatedGPUTime();
s_presents_since_last_update++;
}
@ -1784,10 +1795,9 @@ void System::SingleStepCPU()
CPU::SingleStep();
g_gpu->FlushRender();
SPU::GeneratePendingSamples();
g_gpu->ResetGraphicsAPIState();
s_system_executing = false;
}
@ -1834,35 +1844,29 @@ void System::RecreateSystem()
bool System::CreateGPU(GPURenderer renderer)
{
switch (renderer)
const RenderAPI api = Settings::GetRenderAPIForRenderer(renderer);
if (!g_gpu_device || (renderer != GPURenderer::Software && g_gpu_device->GetRenderAPI() != api))
{
#ifdef WITH_OPENGL
case GPURenderer::HardwareOpenGL:
g_gpu = GPU::CreateHardwareOpenGLRenderer();
break;
#endif
#ifdef WITH_VULKAN
case GPURenderer::HardwareVulkan:
g_gpu = GPU::CreateHardwareVulkanRenderer();
break;
#endif
#ifdef _WIN32
case GPURenderer::HardwareD3D11:
g_gpu = GPU::CreateHardwareD3D11Renderer();
break;
case GPURenderer::HardwareD3D12:
g_gpu = GPU::CreateHardwareD3D12Renderer();
break;
#endif
case GPURenderer::Software:
default:
g_gpu = GPU::CreateSoftwareRenderer();
break;
if (g_gpu_device)
{
Log_WarningPrintf("Recreating GPU device, expecting %s got %s", GPUDevice::RenderAPIToString(api),
GPUDevice::RenderAPIToString(g_gpu_device->GetRenderAPI()));
}
Host::ReleaseGPUDevice();
if (!Host::CreateGPUDevice(api))
{
Host::ReleaseRenderWindow();
return false;
}
}
if (renderer == GPURenderer::Software)
g_gpu = GPU::CreateSoftwareRenderer();
else
g_gpu = GPU::CreateHardwareRenderer();
if (!g_gpu)
{
Log_ErrorPrintf("Failed to initialize %s renderer, falling back to software renderer",
@ -1875,6 +1879,11 @@ bool System::CreateGPU(GPURenderer renderer)
if (!g_gpu)
{
Log_ErrorPrintf("Failed to create fallback software renderer.");
if (!s_keep_gpu_device_on_shutdown)
{
Host::ReleaseGPUDevice();
Host::ReleaseRenderWindow();
}
return false;
}
}
@ -1934,9 +1943,7 @@ bool System::DoState(StateWrapper& sw, GPUTexture** host_texture, bool update_di
return false;
g_gpu->RestoreGraphicsAPIState();
const bool gpu_result = sw.DoMarker("GPU") && g_gpu->DoState(sw, host_texture, update_display);
g_gpu->ResetGraphicsAPIState();
if (!gpu_result)
if (!sw.DoMarker("GPU") || !g_gpu->DoState(sw, host_texture, update_display))
return false;
if (!sw.DoMarker("CDROM") || !CDROM::DoState(sw))
@ -2071,8 +2078,6 @@ void System::InternalReset()
#ifdef WITH_CHEEVOS
Achievements::ResetRuntime();
#endif
g_gpu->ResetGraphicsAPIState();
}
std::string System::GetMediaPathFromSaveState(const char* path)
@ -2283,7 +2288,7 @@ bool System::InternalSaveState(ByteStream* state, u32 screenshot_size /* = 256 *
if (screenshot_size > 0)
{
// assume this size is the width
const float display_aspect_ratio = g_host_display->GetDisplayAspectRatio();
const float display_aspect_ratio = g_gpu_device->GetDisplayAspectRatio();
const u32 screenshot_width = screenshot_size;
const u32 screenshot_height =
std::max(1u, static_cast<u32>(static_cast<float>(screenshot_width) /
@ -2293,7 +2298,7 @@ bool System::InternalSaveState(ByteStream* state, u32 screenshot_size /* = 256 *
std::vector<u32> screenshot_buffer;
u32 screenshot_stride;
GPUTexture::Format screenshot_format;
if (g_host_display->RenderScreenshot(screenshot_width, screenshot_height,
if (g_gpu_device->RenderScreenshot(screenshot_width, screenshot_height,
Common::Rectangle<s32>::FromExtents(0, 0, screenshot_width, screenshot_height),
&screenshot_buffer, &screenshot_stride, &screenshot_format) &&
GPUTexture::ConvertTextureDataToRGBA8(screenshot_width, screenshot_height, screenshot_buffer, screenshot_stride,
@ -2306,7 +2311,7 @@ bool System::InternalSaveState(ByteStream* state, u32 screenshot_size /* = 256 *
}
else
{
if (g_host_display->UsesLowerLeftOrigin())
if (g_gpu_device->UsesLowerLeftOrigin())
{
GPUTexture::FlipTextureDataRGBA8(screenshot_width, screenshot_height, screenshot_buffer, screenshot_stride);
}
@ -2350,8 +2355,6 @@ bool System::InternalSaveState(ByteStream* state, u32 screenshot_size /* = 256 *
header.data_compressed_size = static_cast<u32>(state->GetPosition() - header.offset_to_data);
}
g_gpu->ResetGraphicsAPIState();
if (!result)
return false;
}
@ -2427,7 +2430,7 @@ void System::UpdatePerformanceCounters()
s_fps_timer.ResetTo(now_ticks);
if (g_host_display->IsGPUTimingEnabled())
if (g_gpu_device->IsGPUTimingEnabled())
{
s_average_gpu_time = s_accumulated_gpu_time / static_cast<float>(std::max(s_presents_since_last_update, 1u));
s_gpu_usage = s_accumulated_gpu_time / (time * 10.0f);
@ -2474,7 +2477,7 @@ void System::UpdateSpeedLimiterState()
s_target_speed == 1.0f && IsValid())
{
float host_refresh_rate;
if (g_host_display->GetHostRefreshRate(&host_refresh_rate))
if (g_gpu_device->GetHostRefreshRate(&host_refresh_rate))
{
const float ratio = host_refresh_rate / System::GetThrottleFrequency();
s_syncing_to_host = (ratio >= 0.95f && ratio <= 1.05f);
@ -2530,8 +2533,8 @@ void System::UpdateDisplaySync()
Log_VerbosePrintf("Max display fps: %f (%s)", max_display_fps,
s_display_all_frames ? "displaying all frames" : "skipping displaying frames when needed");
g_host_display->SetDisplayMaxFPS(max_display_fps);
g_host_display->SetVSync(video_sync_enabled);
g_gpu_device->SetDisplayMaxFPS(max_display_fps);
g_gpu_device->SetVSync(video_sync_enabled);
}
bool System::ShouldUseVSync()
@ -3038,10 +3041,7 @@ bool System::DumpVRAM(const char* filename)
return false;
g_gpu->RestoreGraphicsAPIState();
const bool result = g_gpu->DumpVRAMToFile(filename);
g_gpu->ResetGraphicsAPIState();
return result;
return g_gpu->DumpVRAMToFile(filename);
}
bool System::DumpSPURAM(const char* filename)
@ -3492,12 +3492,12 @@ void System::CheckForSettingsChanges(const Settings& old_settings)
{
if (g_settings.display_post_processing && !g_settings.display_post_process_chain.empty())
{
if (!g_host_display->SetPostProcessingChain(g_settings.display_post_process_chain))
if (!g_gpu_device->SetPostProcessingChain(g_settings.display_post_process_chain))
Host::AddOSDMessage(TRANSLATE_STR("OSDMessage", "Failed to load post processing shader chain."), 20.0f);
}
else
{
g_host_display->SetPostProcessingChain({});
g_gpu_device->SetPostProcessingChain({});
}
}
}
@ -3708,8 +3708,6 @@ void System::DoRewind()
s_next_frame_time += s_frame_period;
// TODO: Purge reset/restore
g_gpu->ResetGraphicsAPIState();
Host::RenderDisplay(false);
g_gpu->RestoreGraphicsAPIState();
@ -3989,7 +3987,7 @@ bool System::SaveScreenshot(const char* filename /* = nullptr */, bool full_reso
return false;
}
const bool screenshot_saved = g_host_display->WriteScreenshotToFile(
const bool screenshot_saved = g_gpu_device->WriteScreenshotToFile(
filename, g_settings.display_internal_resolution_screenshots, compress_on_thread);
if (!screenshot_saved)
@ -4340,13 +4338,13 @@ void System::TogglePostProcessing()
{
Host::AddKeyedOSDMessage("PostProcessing", TRANSLATE_STR("OSDMessage", "Post-processing is now enabled."), 10.0f);
if (!g_host_display->SetPostProcessingChain(g_settings.display_post_process_chain))
if (!g_gpu_device->SetPostProcessingChain(g_settings.display_post_process_chain))
Host::AddOSDMessage(TRANSLATE_STR("OSDMessage", "Failed to load post processing shader chain."), 20.0f);
}
else
{
Host::AddKeyedOSDMessage("PostProcessing", TRANSLATE_STR("OSDMessage", "Post-processing is now disabled."), 10.0f);
g_host_display->SetPostProcessingChain({});
g_gpu_device->SetPostProcessingChain({});
}
}
@ -4355,7 +4353,7 @@ void System::ReloadPostProcessingShaders()
if (!IsValid() || !g_settings.display_post_processing)
return;
if (!g_host_display->SetPostProcessingChain(g_settings.display_post_process_chain))
if (!g_gpu_device->SetPostProcessingChain(g_settings.display_post_process_chain))
Host::AddOSDMessage(TRANSLATE_STR("OSDMessage", "Failed to load post-processing shader chain."), 20.0f);
else
Host::AddOSDMessage(TRANSLATE_STR("OSDMessage", "Post-processing shaders reloaded."), 10.0f);
@ -4421,7 +4419,7 @@ void System::UpdateSoftwareCursor()
if (!IsValid())
{
Host::SetMouseMode(false, false);
g_host_display->ClearSoftwareCursor();
g_gpu_device->ClearSoftwareCursor();
return;
}
@ -4444,12 +4442,12 @@ void System::UpdateSoftwareCursor()
if (image && image->IsValid())
{
g_host_display->SetSoftwareCursor(image->GetPixels(), image->GetWidth(), image->GetHeight(), image->GetPitch(),
g_gpu_device->SetSoftwareCursor(image->GetPixels(), image->GetWidth(), image->GetHeight(), image->GetPitch(),
image_scale);
}
else
{
g_host_display->ClearSoftwareCursor();
g_gpu_device->ClearSoftwareCursor();
}
}
@ -4462,13 +4460,13 @@ void System::RequestDisplaySize(float scale /*= 0.0f*/)
scale = g_gpu->IsHardwareRenderer() ? static_cast<float>(g_settings.gpu_resolution_scale) : 1.0f;
const float y_scale =
(static_cast<float>(g_host_display->GetDisplayWidth()) / static_cast<float>(g_host_display->GetDisplayHeight())) /
g_host_display->GetDisplayAspectRatio();
(static_cast<float>(g_gpu_device->GetDisplayWidth()) / static_cast<float>(g_gpu_device->GetDisplayHeight())) /
g_gpu_device->GetDisplayAspectRatio();
const u32 requested_width =
std::max<u32>(static_cast<u32>(std::ceil(static_cast<float>(g_host_display->GetDisplayWidth()) * scale)), 1);
std::max<u32>(static_cast<u32>(std::ceil(static_cast<float>(g_gpu_device->GetDisplayWidth()) * scale)), 1);
const u32 requested_height = std::max<u32>(
static_cast<u32>(std::ceil(static_cast<float>(g_host_display->GetDisplayHeight()) * y_scale * scale)), 1);
static_cast<u32>(std::ceil(static_cast<float>(g_gpu_device->GetDisplayHeight()) * y_scale * scale)), 1);
Host::RequestResizeHostDisplay(static_cast<s32>(requested_width), static_cast<s32>(requested_height));
}

View File

@ -489,16 +489,18 @@ void PumpMessagesOnCPUThread();
/// Requests a specific display window size.
void RequestResizeHostDisplay(s32 width, s32 height);
/// Requests shut down and exit of the hosting application. This may not actually exit,
/// if the user cancels the shutdown confirmation.
void RequestExit(bool allow_confirm);
/// Requests shut down of the current virtual machine.
void RequestSystemShutdown(bool allow_confirm, bool save_state);
/// Returns true if the hosting application is currently fullscreen.
bool IsFullscreen();
/// Attempts to create the rendering device backend.
bool CreateGPUDevice(RenderAPI api);
/// Alters fullscreen state of hosting application.
void SetFullscreen(bool enabled);
/// Handles fullscreen transitions and such.
void UpdateDisplayWindow();
/// Called when the window is resized.
void ResizeDisplayWindow(s32 width, s32 height, float scale);
/// Destroys any active rendering device.
void ReleaseGPUDevice();
} // namespace Host

View File

@ -62,6 +62,9 @@ enum class GPURenderer : u8
HardwareD3D11,
HardwareD3D12,
#endif
#ifdef __APPLE__
HardwareMetal,
#endif
#ifdef WITH_VULKAN
HardwareVulkan,
#endif

View File

@ -67,14 +67,12 @@ if(USE_WAYLAND)
)
endif()
if(USE_DRMKMS AND USE_EVDEV)
message(STATUS "Building VTY/DRM/KMS/EVDev NoGUI Platform.")
target_compile_definitions(duckstation-nogui PRIVATE "NOGUI_PLATFORM_VTY=1" "WITH_DRMKMS=1")
if(APPLE)
message(STATUS "Building Cocoa NoGUI Platform.")
target_sources(duckstation-nogui PRIVATE
vty_key_names.h
vty_nogui_platform.cpp
vty_nogui_platform.h
cocoa_key_names.h
cocoa_nogui_platform.mm
cocoa_nogui_platform.h
)
target_include_directories(duckstation-nogui PRIVATE ${LIBEVDEV_INCLUDE_DIRS})
target_link_libraries(duckstation-nogui PRIVATE ${LIBEVDEV_LIBRARIES})
endif()

View File

@ -0,0 +1,133 @@
#pragma once
#include "common/types.h"
#include <array>
#include <cstring>
#include <map>
#include <optional>
#include <string_view>
#include <Carbon/Carbon.h>
namespace CocoaKeyNames {
static const std::map<int, const char*> s_cocoa_key_names = {
{kVK_Return, "Return"},
{kVK_Escape, "Escape"},
{kVK_Delete, "Backspace"},
{kVK_Tab, "Tab"},
{kVK_Space, "Space"},
{kVK_ANSI_Quote, "Quote"},
{kVK_ANSI_Comma, "Comma"},
{kVK_ANSI_Minus, "Minus"},
{kVK_ANSI_Period, "Period"},
{kVK_ANSI_Slash, "Slash"},
{kVK_ANSI_0, "0"},
{kVK_ANSI_1, "1"},
{kVK_ANSI_2, "2"},
{kVK_ANSI_3, "3"},
{kVK_ANSI_4, "4"},
{kVK_ANSI_5, "5"},
{kVK_ANSI_6, "6"},
{kVK_ANSI_7, "7"},
{kVK_ANSI_8, "8"},
{kVK_ANSI_9, "9"},
{kVK_ANSI_Semicolon, "Semcolon"},
{kVK_ANSI_Equal, "Equal"},
{kVK_ANSI_LeftBracket, "BracketLeft"},
{kVK_ANSI_Backslash, "Backslash"},
{kVK_ANSI_RightBracket, "BracketRight"},
{kVK_ANSI_Grave, "Grave"},
{kVK_ANSI_A, "A"},
{kVK_ANSI_B, "B"},
{kVK_ANSI_C, "C"},
{kVK_ANSI_D, "D"},
{kVK_ANSI_E, "E"},
{kVK_ANSI_F, "F"},
{kVK_ANSI_G, "G"},
{kVK_ANSI_H, "H"},
{kVK_ANSI_I, "I"},
{kVK_ANSI_J, "J"},
{kVK_ANSI_K, "K"},
{kVK_ANSI_L, "L"},
{kVK_ANSI_M, "M"},
{kVK_ANSI_N, "N"},
{kVK_ANSI_O, "O"},
{kVK_ANSI_P, "P"},
{kVK_ANSI_Q, "Q"},
{kVK_ANSI_R, "R"},
{kVK_ANSI_S, "S"},
{kVK_ANSI_T, "T"},
{kVK_ANSI_U, "U"},
{kVK_ANSI_V, "V"},
{kVK_ANSI_W, "W"},
{kVK_ANSI_X, "X"},
{kVK_ANSI_Y, "Y"},
{kVK_ANSI_Z, "Z"},
{kVK_CapsLock, "CapsLock"},
{kVK_F1, "F1"},
{kVK_F2, "F2"},
{kVK_F3, "F3"},
{kVK_F4, "F4"},
{kVK_F5, "F5"},
{kVK_F6, "F6"},
{kVK_F7, "F7"},
{kVK_F8, "F8"},
{kVK_F9, "F9"},
{kVK_F10, "F10"},
{kVK_F11, "F11"},
{kVK_F12, "F12"},
{kVK_Home, "Home"},
{kVK_PageUp, "PageUp"},
{kVK_End, "End"},
{kVK_PageDown, "PageDown"},
{kVK_RightArrow, "Right"},
{kVK_LeftArrow, "Left"},
{kVK_DownArrow, "Down"},
{kVK_UpArrow, "Up"},
{kVK_ANSI_KeypadDivide, "KeypadDivide"},
{kVK_ANSI_KeypadMultiply, "KeypadMultiply"},
{kVK_ANSI_KeypadMinus, "KeypadMinus"},
{kVK_ANSI_KeypadPlus, "KeypadPlus"},
{kVK_ANSI_KeypadEnter, "KeypadReturn"},
{kVK_ANSI_Keypad1, "Keypad1"},
{kVK_ANSI_Keypad2, "Keypad2"},
{kVK_ANSI_Keypad3, "Keypad3"},
{kVK_ANSI_Keypad4, "Keypad4"},
{kVK_ANSI_Keypad5, "Keypad5"},
{kVK_ANSI_Keypad6, "Keypad6"},
{kVK_ANSI_Keypad7, "Keypad7"},
{kVK_ANSI_Keypad8, "Keypad8"},
{kVK_ANSI_Keypad9, "Keypad9"},
{kVK_ANSI_Keypad0, "Keypad0"},
{kVK_ANSI_KeypadDecimal, "KeypadPeriod"},
{kVK_F13, "F13"},
{kVK_F14, "F14"},
{kVK_F15, "F15"},
{kVK_F16, "F16"},
{kVK_F17, "F17"},
{kVK_F18, "F18"},
{kVK_F19, "F19"},
{kVK_F20, "F20"},
{kVK_Help, "Help"},
{kVK_Option, "Alt"},
{kVK_Command, "Super"},
{kVK_Function, "Control"},
};
static const char* GetKeyName(unsigned short key)
{
const auto it = s_cocoa_key_names.find(key);
return it == s_cocoa_key_names.end() ? nullptr : it->second;
}
static std::optional<unsigned short> GetKeyCodeForName(const std::string_view& key_name)
{
for (const auto& it : s_cocoa_key_names)
{
if (key_name == it.second)
return it.first;
}
return std::nullopt;
}
} // namespace CocoaKeyNames

Some files were not shown because too many files have changed in this diff Show More