Merge pull request #16420 from hrydgard/gpu-features-debug

Add a way to view GPU_USE flags at runtime
This commit is contained in:
Henrik Rydgård 2022-11-23 17:00:33 +01:00 committed by GitHub
commit 2e492bf333
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 90 additions and 3 deletions

View File

@ -76,6 +76,9 @@ inline bool DataFormatIsColor(DataFormat fmt) {
return !DataFormatIsDepthStencil(fmt);
}
// Limited format support for now.
const char *DataFormatToString(DataFormat fmt);
void ConvertFromRGBA8888(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format);
void ConvertFromBGRA8888(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format);
void ConvertToD32F(uint8_t *dst, const uint8_t *src, uint32_t dstStride, uint32_t srcStride, uint32_t width, uint32_t height, DataFormat format);

View File

@ -525,7 +525,6 @@ static bool HasIntelDualSrcBug(int versions[4]) {
}
OpenGLContext::OpenGLContext() {
// TODO: Detect more caps
if (gl_extensions.IsGLES) {
if (gl_extensions.OES_packed_depth_stencil || gl_extensions.OES_depth24) {
caps_.preferredDepthBufferFormat = DataFormat::D24_S8;

View File

@ -56,6 +56,27 @@ size_t DataFormatSizeInBytes(DataFormat fmt) {
}
}
const char *DataFormatToString(DataFormat fmt) {
switch (fmt) {
case DataFormat::R8_UNORM: return "R8_UNORM";
case DataFormat::R8G8_UNORM: return "R8G8_UNORM";
case DataFormat::R8G8B8A8_UNORM: return "R8G8B8A8_UNORM";
case DataFormat::B8G8R8A8_UNORM: return "B8G8R8A8_UNORM";
case DataFormat::R16_UNORM: return "R16_UNORM";
case DataFormat::R16_FLOAT: return "R16_FLOAT";
case DataFormat::R32_FLOAT: return "R32_FLOAT";
case DataFormat::S8: return "S8";
case DataFormat::D16: return "D16";
case DataFormat::D24_S8: return "D24_S8";
case DataFormat::D32F: return "D32F";
case DataFormat::D32F_S8: return "D32F_S8";
default:
return "(N/A)";
}
}
bool DataFormatIsDepthStencil(DataFormat fmt) {
switch (fmt) {
case DataFormat::D16:

View File

@ -256,7 +256,7 @@ std::string FragmentShaderDesc(const FShaderID &id) {
else if (id.Bit(FS_BIT_COLOR_TEST)) desc << "ColorTest " << alphaTestFuncs[id.Bits(FS_BIT_COLOR_TEST_FUNC, 2)] << " "; // first 4 match
if (id.Bit(FS_BIT_TEST_DISCARD_TO_ZERO)) desc << "TestDiscardToZero ";
if (id.Bit(FS_BIT_NO_DEPTH_CANNOT_DISCARD_STENCIL)) desc << "StencilDiscardWorkaround ";
if (id.Bits(FS_BIT_REPLACE_LOGIC_OP, 4) != GE_LOGIC_COPY) desc << "ReplaceLogic ";
if ((id.Bits(FS_BIT_REPLACE_LOGIC_OP, 4) != GE_LOGIC_COPY) && !id.Bit(FS_BIT_CLEARMODE)) desc << "ReplaceLogic ";
if (id.Bit(FS_BIT_SAMPLE_ARRAY_TEXTURE)) desc << "TexArray ";
if (id.Bit(FS_BIT_STEREO)) desc << "Stereo ";
return desc.str();

View File

@ -300,8 +300,8 @@ bool GenerateVertexShader(const VShaderID &id, char *buffer, const ShaderLanguag
}
WRITE(p, "layout (location = 3) out highp float v_fogdepth;\n");
WRITE(p, "invariant gl_Position;\n");
WRITE(p, "invariant gl_Position;\n");
} else if (compat.shaderLanguage == HLSL_D3D11 || compat.shaderLanguage == HLSL_D3D9) {
// Note: These two share some code after this hellishly large if/else.
if (compat.shaderLanguage == HLSL_D3D11) {

View File

@ -144,6 +144,9 @@ GPU_GLES::~GPU_GLES() {
shaderManagerGL_ = nullptr;
delete framebufferManagerGL_;
delete textureCacheGL_;
// Clear features so they're not visible in system info.
gstate_c.useFlags = 0;
}
// Take the raw GL extension and versioning data and turn into feature flags.

View File

@ -367,3 +367,46 @@ void GPUStateCache::DoState(PointerWrap &p) {
Do(p, savedContextVersion);
}
}
static const char *const gpuUseFlagNames[32] = {
"GPU_USE_DUALSOURCE_BLEND",
"GPU_USE_LIGHT_UBERSHADER",
"GPU_USE_FRAGMENT_TEST_CACHE",
"GPU_USE_VS_RANGE_CULLING",
"GPU_USE_BLEND_MINMAX",
"GPU_USE_LOGIC_OP",
"GPU_USE_DEPTH_RANGE_HACK",
"GPU_USE_TEXTURE_NPOT",
"GPU_USE_ANISOTROPY",
"GPU_USE_CLEAR_RAM_HACK",
"GPU_USE_INSTANCE_RENDERING",
"GPU_USE_VERTEX_TEXTURE_FETCH",
"GPU_USE_TEXTURE_FLOAT",
"GPU_USE_16BIT_FORMATS",
"GPU_USE_DEPTH_CLAMP",
"GPU_USE_TEXTURE_LOD_CONTROL",
"GPU_USE_DEPTH_TEXTURE",
"GPU_USE_ACCURATE_DEPTH",
"GPU_USE_GS_CULLING",
"GPU_USE_REVERSE_COLOR_ORDER",
"GPU_USE_FRAMEBUFFER_FETCH",
"GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT",
"GPU_ROUND_FRAGMENT_DEPTH_TO_16BIT",
"GPU_ROUND_DEPTH_TO_16BIT",
"GPU_USE_CLIP_DISTANCE",
"GPU_USE_CULL_DISTANCE",
"N/A", // bit 26
"N/A", // bit 27
"N/A", // bit 28
"GPU_USE_VIRTUAL_REALITY",
"GPU_USE_SINGLE_PASS_STEREO",
"GPU_USE_SIMPLE_STEREO_PERSPECTIVE",
};
const char *GpuUseFlagToString(int useFlag) {
if ((u32)useFlag < 32) {
return gpuUseFlagNames[useFlag];
} else {
return "N/A";
}
}

View File

@ -470,6 +470,7 @@ struct UVScale {
// location. Sometimes we need to take things into account in multiple places, it helps
// to centralize into flags like this. They're also fast to check since the cache line
// will be hot.
// NOTE: Do not forget to update the string array at the end of GPUState.cpp!
enum {
GPU_USE_DUALSOURCE_BLEND = FLAG_BIT(0),
GPU_USE_LIGHT_UBERSHADER = FLAG_BIT(1),
@ -504,6 +505,9 @@ enum {
GPU_USE_SIMPLE_STEREO_PERSPECTIVE = FLAG_BIT(31),
};
// Note that this take a flag index, not the bit value.
const char *GpuUseFlagToString(int useFlag);
struct KnownVertexBounds {
u16 minU;
u16 minV;

View File

@ -553,6 +553,7 @@ void SystemInfoScreen::CreateViews() {
deviceSpecs->Add(new InfoItem(si->T("High precision float range"), temp));
}
}
deviceSpecs->Add(new InfoItem(si->T("Depth buffer format"), DataFormatToString(draw->GetDeviceCaps().preferredDepthBufferFormat)));
deviceSpecs->Add(new ItemHeader(si->T("OS Information")));
deviceSpecs->Add(new InfoItem(si->T("Memory Page Size"), StringFromFormat(si->T("%d bytes"), GetMemoryProtectPageSize())));
deviceSpecs->Add(new InfoItem(si->T("RW/RX exclusive"), PlatformIsWXExclusive() ? di->T("Active") : di->T("Inactive")));
@ -617,6 +618,19 @@ void SystemInfoScreen::CreateViews() {
deviceSpecs->Add(new InfoItem("Moga", moga));
#endif
if (gstate_c.useFlags != 0) {
// We're in-game, and can determine these.
// TODO: Call a static version of GPUCommon::CheckGPUFeatures() and derive them here directly.
deviceSpecs->Add(new ItemHeader(si->T("GPU Flags")));
for (int i = 0; i < 32; i++) {
if (gstate_c.Use((1 << i))) {
deviceSpecs->Add(new TextView(GpuUseFlagToString(i), new LayoutParams(FILL_PARENT, WRAP_CONTENT)))->SetFocusable(true);
}
}
}
ViewGroup *storageScroll = new ScrollView(ORIENT_VERTICAL, new LinearLayoutParams(FILL_PARENT, FILL_PARENT));
storageScroll->SetTag("DevSystemInfoBuildConfig");
LinearLayout *storage = new LinearLayout(ORIENT_VERTICAL);