mirror of
https://github.com/hrydgard/ppsspp.git
synced 2024-11-23 21:39:52 +00:00
Merge pull request #13769 from hrydgard/opengl-detect-cleanup
Minor cleanups in GPU feature detection, and restore a lost compat hack.
This commit is contained in:
commit
73da378efd
@ -17,7 +17,8 @@ int pngLoad(const char *file, int *pwidth, int *pheight, unsigned char **image_d
|
||||
|
||||
if (PNG_IMAGE_FAILED(png))
|
||||
{
|
||||
ERROR_LOG(IO, "pngLoad: %s", png.message);
|
||||
WARN_LOG(IO, "pngLoad: %s (%s)", png.message, file);
|
||||
*image_data_ptr = nullptr;
|
||||
return 0;
|
||||
}
|
||||
*pwidth = png.width;
|
||||
@ -37,7 +38,8 @@ int pngLoadPtr(const unsigned char *input_ptr, size_t input_len, int *pwidth, in
|
||||
png_image_begin_read_from_memory(&png, input_ptr, input_len);
|
||||
|
||||
if (PNG_IMAGE_FAILED(png)) {
|
||||
ERROR_LOG(IO, "pngLoad: %s", png.message);
|
||||
WARN_LOG(IO, "pngLoad: %s", png.message);
|
||||
*image_data_ptr = nullptr;
|
||||
return 0;
|
||||
}
|
||||
*pwidth = png.width;
|
||||
@ -49,6 +51,7 @@ int pngLoadPtr(const unsigned char *input_ptr, size_t input_len, int *pwidth, in
|
||||
size_t size = PNG_IMAGE_SIZE(png);
|
||||
if (!size) {
|
||||
ERROR_LOG(IO, "pngLoad: empty image");
|
||||
*image_data_ptr = nullptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -275,6 +275,8 @@ D3D11DrawContext::D3D11DrawContext(ID3D11Device *device, ID3D11DeviceContext *de
|
||||
case 0x163C:
|
||||
case 0x8086:
|
||||
case 0x8087: caps_.vendor = GPUVendor::VENDOR_INTEL; break;
|
||||
// TODO: There are Windows ARM devices that could have Qualcomm here too.
|
||||
// Not sure where I'll find the vendor codes for those though...
|
||||
default:
|
||||
caps_.vendor = GPUVendor::VENDOR_UNKNOWN;
|
||||
}
|
||||
|
@ -138,14 +138,20 @@ void CheckGLExtensions() {
|
||||
memset(&gl_extensions, 0, sizeof(gl_extensions));
|
||||
gl_extensions.IsCoreContext = useCoreContext;
|
||||
|
||||
#ifdef USING_GLES2
|
||||
gl_extensions.IsGLES = !useCoreContext;
|
||||
#endif
|
||||
|
||||
const char *renderer = (const char *)glGetString(GL_RENDERER);
|
||||
const char *versionStr = (const char *)glGetString(GL_VERSION);
|
||||
const char *glslVersionStr = (const char *)glGetString(GL_SHADING_LANGUAGE_VERSION);
|
||||
|
||||
|
||||
#ifdef USING_GLES2
|
||||
gl_extensions.IsGLES = !useCoreContext;
|
||||
#else
|
||||
if (strstr(versionStr, "OpenGL ES") == versionStr) {
|
||||
// For desktops running GLES.
|
||||
gl_extensions.IsGLES = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check vendor string to try and guess GPU
|
||||
const char *cvendor = (char *)glGetString(GL_VENDOR);
|
||||
// TODO: move this stuff to gpu_features.cpp
|
||||
@ -175,6 +181,8 @@ void CheckGLExtensions() {
|
||||
// Just for reference: Galaxy Y has renderer == "VideoCore IV HW"
|
||||
} else if (vendor == "Vivante Corporation") {
|
||||
gl_extensions.gpuVendor = GPU_VENDOR_VIVANTE;
|
||||
} else if (vendor == "Apple") {
|
||||
gl_extensions.gpuVendor = GPU_VENDOR_APPLE;
|
||||
} else {
|
||||
gl_extensions.gpuVendor = GPU_VENDOR_UNKNOWN;
|
||||
}
|
||||
@ -218,12 +226,6 @@ void CheckGLExtensions() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef USING_GLES2
|
||||
if (strstr(versionStr, "OpenGL ES") == versionStr) {
|
||||
// For desktops running GLES.
|
||||
gl_extensions.IsGLES = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!gl_extensions.IsGLES) { // For desktop GL
|
||||
gl_extensions.ver[0] = parsed[0];
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <string>
|
||||
|
||||
// TODO: Replace with thin3d's vendor enum.
|
||||
enum {
|
||||
GPU_VENDOR_NVIDIA = 1,
|
||||
GPU_VENDOR_AMD = 2,
|
||||
@ -15,6 +16,7 @@ enum {
|
||||
GPU_VENDOR_QUALCOMM = 6, // Adreno
|
||||
GPU_VENDOR_BROADCOM = 7, // Raspberry PI etc
|
||||
GPU_VENDOR_VIVANTE = 8,
|
||||
GPU_VENDOR_APPLE = 9,
|
||||
GPU_VENDOR_UNKNOWN = 0,
|
||||
};
|
||||
|
||||
|
@ -43,7 +43,8 @@ void GLQueueRunner::CreateDeviceObjects() {
|
||||
// An eternal optimist.
|
||||
sawOutOfMemory_ = false;
|
||||
|
||||
// Populate some strings from the GL thread.
|
||||
// Populate some strings from the GL thread so they can be queried from thin3d.
|
||||
// TODO: Merge with GLFeatures.cpp/h
|
||||
auto populate = [&](int name) {
|
||||
const GLubyte *value = glGetString(name);
|
||||
if (!value)
|
||||
@ -55,9 +56,6 @@ void GLQueueRunner::CreateDeviceObjects() {
|
||||
populate(GL_RENDERER);
|
||||
populate(GL_VERSION);
|
||||
populate(GL_SHADING_LANGUAGE_VERSION);
|
||||
if (!gl_extensions.IsCoreContext) { // Not OK to query this in core profile!
|
||||
populate(GL_EXTENSIONS);
|
||||
}
|
||||
CHECK_GL_ERROR_IF_DEBUG();
|
||||
|
||||
useDebugGroups_ = !gl_extensions.IsGLES && gl_extensions.VersionGEThan(4, 3);
|
||||
|
@ -442,6 +442,7 @@ public:
|
||||
case GPUVendor::VENDOR_ARM: return "VENDOR_ARM";
|
||||
case GPUVendor::VENDOR_BROADCOM: return "VENDOR_BROADCOM";
|
||||
case GPUVendor::VENDOR_VIVANTE: return "VENDOR_VIVANTE";
|
||||
case GPUVendor::VENDOR_APPLE: return "VENDOR_APPLE";
|
||||
case GPUVendor::VENDOR_UNKNOWN:
|
||||
default:
|
||||
return "VENDOR_UNKNOWN";
|
||||
@ -544,6 +545,7 @@ OpenGLContext::OpenGLContext() {
|
||||
case GPU_VENDOR_INTEL: caps_.vendor = GPUVendor::VENDOR_INTEL; break;
|
||||
case GPU_VENDOR_IMGTEC: caps_.vendor = GPUVendor::VENDOR_IMGTEC; break;
|
||||
case GPU_VENDOR_VIVANTE: caps_.vendor = GPUVendor::VENDOR_VIVANTE; break;
|
||||
case GPU_VENDOR_APPLE: caps_.vendor = GPUVendor::VENDOR_APPLE; break;
|
||||
case GPU_VENDOR_UNKNOWN:
|
||||
default:
|
||||
caps_.vendor = GPUVendor::VENDOR_UNKNOWN;
|
||||
@ -620,30 +622,28 @@ OpenGLContext::OpenGLContext() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (gl_extensions.IsCoreContext) {
|
||||
if (gl_extensions.VersionGEThan(3, 3, 0)) {
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_3xx;
|
||||
shaderLanguageDesc_.fragColor0 = "fragColor0";
|
||||
shaderLanguageDesc_.texture = "texture";
|
||||
shaderLanguageDesc_.glslES30 = true;
|
||||
// I don't know why we were checking for IsCoreContext here before.
|
||||
if (gl_extensions.VersionGEThan(3, 3, 0)) {
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_3xx;
|
||||
shaderLanguageDesc_.fragColor0 = "fragColor0";
|
||||
shaderLanguageDesc_.texture = "texture";
|
||||
shaderLanguageDesc_.glslES30 = true;
|
||||
shaderLanguageDesc_.bitwiseOps = true;
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch";
|
||||
shaderLanguageDesc_.varying_vs = "out";
|
||||
shaderLanguageDesc_.varying_fs = "in";
|
||||
shaderLanguageDesc_.attribute = "in";
|
||||
} else if (gl_extensions.VersionGEThan(3, 0, 0)) {
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_1xx;
|
||||
shaderLanguageDesc_.fragColor0 = "fragColor0";
|
||||
shaderLanguageDesc_.bitwiseOps = true;
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch";
|
||||
} else {
|
||||
// This too...
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_1xx;
|
||||
if (gl_extensions.EXT_gpu_shader4) {
|
||||
shaderLanguageDesc_.bitwiseOps = true;
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch";
|
||||
shaderLanguageDesc_.varying_vs = "out";
|
||||
shaderLanguageDesc_.varying_fs = "in";
|
||||
shaderLanguageDesc_.attribute = "in";
|
||||
} else if (gl_extensions.VersionGEThan(3, 0, 0)) {
|
||||
// Hm, I think this is wrong. This should be outside "if (gl_extensions.IsCoreContext)".
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_1xx;
|
||||
shaderLanguageDesc_.fragColor0 = "fragColor0";
|
||||
shaderLanguageDesc_.bitwiseOps = true;
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch";
|
||||
} else {
|
||||
// This too...
|
||||
shaderLanguageDesc_.shaderLanguage = ShaderLanguage::GLSL_1xx;
|
||||
if (gl_extensions.EXT_gpu_shader4) {
|
||||
shaderLanguageDesc_.bitwiseOps = true;
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch2D";
|
||||
}
|
||||
shaderLanguageDesc_.texelFetch = "texelFetch2D";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -222,6 +222,7 @@ enum class GPUVendor {
|
||||
VENDOR_IMGTEC, // PowerVR
|
||||
VENDOR_BROADCOM, // Raspberry
|
||||
VENDOR_VIVANTE,
|
||||
VENDOR_APPLE,
|
||||
};
|
||||
|
||||
enum class NativeObject {
|
||||
|
@ -616,6 +616,8 @@ int TextureCacheCommon::GetBestCandidateIndex(const std::vector<AttachCandidate>
|
||||
case FramebufferMatch::VALID:
|
||||
relevancy += 1000;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Bonus point for matching stride.
|
||||
@ -791,6 +793,8 @@ void TextureCacheCommon::NotifyFramebuffer(VirtualFramebuffer *framebuffer, Fram
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1058,7 +1062,7 @@ void TextureCacheCommon::NotifyConfigChanged() {
|
||||
scaleFactor = g_Config.iTexScalingLevel;
|
||||
}
|
||||
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_OES_TEXTURE_NPOT)) {
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_TEXTURE_NPOT)) {
|
||||
// Reduce the scale factor to a power of two (e.g. 2 or 4) if textures must be a power of two.
|
||||
while ((scaleFactor & (scaleFactor - 1)) != 0) {
|
||||
--scaleFactor;
|
||||
|
@ -1102,6 +1102,13 @@ bool GenerateVertexShader(const VShaderID &id, char *buffer, const ShaderLanguag
|
||||
|
||||
// We've named the output gl_Position in HLSL as well.
|
||||
WRITE(p, " %sgl_Position = outPos;\n", compat.vsOutPrefix);
|
||||
|
||||
if (gstate_c.Supports(GPU_NEEDS_Z_EQUAL_W_HACK)) {
|
||||
// See comment in GPU_Vulkan.cpp.
|
||||
WRITE(p, " if (%sgl_Position.z == %sgl_Position.w) %sgl_Position.z *= 0.999999;\n",
|
||||
compat.vsOutPrefix, compat.vsOutPrefix, compat.vsOutPrefix);
|
||||
}
|
||||
|
||||
if (compat.shaderLanguage == GLSL_VULKAN) {
|
||||
WRITE(p, " gl_PointSize = 1.0;\n");
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ void GPU_D3D11::CheckGPUFeatures() {
|
||||
#endif
|
||||
|
||||
features |= GPU_SUPPORTS_DEPTH_TEXTURE;
|
||||
features |= GPU_SUPPORTS_OES_TEXTURE_NPOT;
|
||||
features |= GPU_SUPPORTS_TEXTURE_NPOT;
|
||||
if (draw_->GetDeviceCaps().dualSourceBlend)
|
||||
features |= GPU_SUPPORTS_DUALSOURCE_BLEND;
|
||||
if (draw_->GetDeviceCaps().depthClampSupported)
|
||||
|
@ -204,7 +204,7 @@ void GPU_DX9::CheckGPUFeatures() {
|
||||
if ((caps.RasterCaps & D3DPRASTERCAPS_ANISOTROPY) != 0 && caps.MaxAnisotropy > 1)
|
||||
features |= GPU_SUPPORTS_ANISOTROPY;
|
||||
if ((caps.TextureCaps & (D3DPTEXTURECAPS_NONPOW2CONDITIONAL | D3DPTEXTURECAPS_POW2)) == 0)
|
||||
features |= GPU_SUPPORTS_OES_TEXTURE_NPOT;
|
||||
features |= GPU_SUPPORTS_TEXTURE_NPOT;
|
||||
}
|
||||
|
||||
if (!g_Config.bHighQualityDepth) {
|
||||
|
@ -672,7 +672,7 @@ bool DrawEngineGLES::IsCodePtrVertexDecoder(const u8 *ptr) const {
|
||||
|
||||
bool DrawEngineGLES::SupportsHWTessellation() const {
|
||||
bool hasTexelFetch = gl_extensions.GLES3 || (!gl_extensions.IsGLES && gl_extensions.VersionGEThan(3, 3, 0)) || gl_extensions.EXT_gpu_shader4;
|
||||
return hasTexelFetch && gstate_c.SupportsAll(GPU_SUPPORTS_VERTEX_TEXTURE_FETCH | GPU_SUPPORTS_TEXTURE_FLOAT);
|
||||
return hasTexelFetch && gstate_c.SupportsAll(GPU_SUPPORTS_VERTEX_TEXTURE_FETCH | GPU_SUPPORTS_TEXTURE_FLOAT | GPU_SUPPORTS_INSTANCE_RENDERING);
|
||||
}
|
||||
|
||||
bool DrawEngineGLES::UpdateUseHWTessellation(bool enable) {
|
||||
|
@ -105,7 +105,7 @@ FramebufferManagerGLES::FramebufferManagerGLES(Draw::DrawContext *draw, GLRender
|
||||
needGLESRebinds_ = true;
|
||||
CreateDeviceObjects();
|
||||
render_ = (GLRenderManager *)draw_->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
|
||||
presentation_->SetLanguage(gl_extensions.IsCoreContext ? GLSL_3xx : GLSL_1xx);
|
||||
presentation_->SetLanguage(draw_->GetShaderLanguageDesc().shaderLanguage);
|
||||
}
|
||||
|
||||
void FramebufferManagerGLES::Init() {
|
||||
|
@ -185,15 +185,12 @@ void GPU_GLES::CheckGPUFeatures() {
|
||||
if (gl_extensions.ARB_framebuffer_object || gl_extensions.NV_framebuffer_blit || gl_extensions.GLES3) {
|
||||
features |= GPU_SUPPORTS_FRAMEBUFFER_BLIT | GPU_SUPPORTS_FRAMEBUFFER_BLIT_TO_DEPTH;
|
||||
}
|
||||
if (gl_extensions.ARB_vertex_array_object && gl_extensions.IsCoreContext) {
|
||||
features |= GPU_SUPPORTS_VAO;
|
||||
}
|
||||
|
||||
if ((gl_extensions.gpuVendor == GPU_VENDOR_NVIDIA) || (gl_extensions.gpuVendor == GPU_VENDOR_AMD))
|
||||
features |= GPU_PREFER_REVERSE_COLOR_ORDER;
|
||||
|
||||
if (gl_extensions.OES_texture_npot)
|
||||
features |= GPU_SUPPORTS_OES_TEXTURE_NPOT;
|
||||
features |= GPU_SUPPORTS_TEXTURE_NPOT;
|
||||
|
||||
if (gl_extensions.EXT_blend_minmax)
|
||||
features |= GPU_SUPPORTS_BLEND_MINMAX;
|
||||
|
@ -482,7 +482,7 @@ enum {
|
||||
GPU_SUPPORTS_32BIT_INT_FSHADER = FLAG_BIT(15),
|
||||
GPU_SUPPORTS_DEPTH_TEXTURE = FLAG_BIT(16),
|
||||
GPU_SUPPORTS_ACCURATE_DEPTH = FLAG_BIT(17),
|
||||
GPU_SUPPORTS_VAO = FLAG_BIT(18),
|
||||
// Free bit: 18,
|
||||
GPU_SUPPORTS_COPY_IMAGE = FLAG_BIT(19),
|
||||
GPU_SUPPORTS_ANY_FRAMEBUFFER_FETCH = FLAG_BIT(20),
|
||||
GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT = FLAG_BIT(21),
|
||||
@ -491,7 +491,7 @@ enum {
|
||||
GPU_SUPPORTS_TEXTURE_LOD_CONTROL = FLAG_BIT(24),
|
||||
GPU_SUPPORTS_FRAMEBUFFER_BLIT = FLAG_BIT(26),
|
||||
GPU_SUPPORTS_FRAMEBUFFER_BLIT_TO_DEPTH = FLAG_BIT(27),
|
||||
GPU_SUPPORTS_OES_TEXTURE_NPOT = FLAG_BIT(28),
|
||||
GPU_SUPPORTS_TEXTURE_NPOT = FLAG_BIT(28),
|
||||
GPU_NEEDS_Z_EQUAL_W_HACK = FLAG_BIT(29),
|
||||
// Free bit: 30
|
||||
GPU_PREFER_REVERSE_COLOR_ORDER = FLAG_BIT(31),
|
||||
|
@ -72,7 +72,7 @@ SoftGPU::SoftGPU(GraphicsContext *gfxCtx, Draw::DrawContext *draw)
|
||||
|
||||
switch (GetGPUBackend()) {
|
||||
case GPUBackend::OPENGL:
|
||||
presentation_->SetLanguage(gl_extensions.IsCoreContext ? GLSL_3xx : GLSL_1xx);
|
||||
presentation_->SetLanguage(draw_->GetShaderLanguageDesc().shaderLanguage);
|
||||
break;
|
||||
case GPUBackend::DIRECT3D9:
|
||||
ShaderTranslationInit();
|
||||
|
@ -227,7 +227,7 @@ void GPU_Vulkan::CheckGPUFeatures() {
|
||||
features |= GPU_SUPPORTS_FRAMEBUFFER_BLIT;
|
||||
features |= GPU_SUPPORTS_BLEND_MINMAX;
|
||||
features |= GPU_SUPPORTS_COPY_IMAGE;
|
||||
features |= GPU_SUPPORTS_OES_TEXTURE_NPOT;
|
||||
features |= GPU_SUPPORTS_TEXTURE_NPOT;
|
||||
features |= GPU_SUPPORTS_INSTANCE_RENDERING;
|
||||
features |= GPU_SUPPORTS_VERTEX_TEXTURE_FETCH;
|
||||
features |= GPU_SUPPORTS_TEXTURE_FLOAT;
|
||||
|
@ -533,17 +533,19 @@ void MainUI::initializeGL() {
|
||||
g_Config.iGPUBackend = (int)GPUBackend::OPENGL;
|
||||
}
|
||||
|
||||
SetGLCoreContext(format().profile() == QGLFormat::CoreProfile);
|
||||
bool useCoreContext = format().profile() == QGLFormat::CoreProfile;
|
||||
|
||||
SetGLCoreContext(useCoreContext);
|
||||
|
||||
#ifndef USING_GLES2
|
||||
// Some core profile drivers elide certain extensions from GL_EXTENSIONS/etc.
|
||||
// glewExperimental allows us to force GLEW to search for the pointers anyway.
|
||||
if (gl_extensions.IsCoreContext) {
|
||||
if (useCoreContext) {
|
||||
glewExperimental = true;
|
||||
}
|
||||
glewInit();
|
||||
// Unfortunately, glew will generate an invalid enum error, ignore.
|
||||
if (gl_extensions.IsCoreContext) {
|
||||
if (useCoreContext) {
|
||||
glGetError();
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user