mirror of
https://github.com/libretro/ppsspp.git
synced 2024-11-27 10:20:49 +00:00
Workaround for bad int behaviour on Adreno / GLES. (no problems in
Vulkan). See #11480, should help #11479.
This commit is contained in:
parent
42a73e4c76
commit
842290b6dd
@ -53,6 +53,10 @@ DepalShaderCacheGLES::DepalShaderCacheGLES(Draw::DrawContext *draw) {
|
||||
render_ = (GLRenderManager *)draw->GetNativeObject(Draw::NativeObject::RENDER_MANAGER);
|
||||
// Pre-build the vertex program
|
||||
useGL3_ = gl_extensions.GLES3 || gl_extensions.VersionGEThan(3, 3);
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_32BIT_INT_FSHADER)) {
|
||||
// Use the floating point path, it just can't handle the math.
|
||||
useGL3_ = false;
|
||||
}
|
||||
|
||||
vertexShaderFailed_ = false;
|
||||
vertexShader_ = 0;
|
||||
|
@ -190,11 +190,18 @@ void GPU_GLES::CheckGPUFeatures() {
|
||||
}
|
||||
|
||||
if (gl_extensions.IsGLES) {
|
||||
if (gl_extensions.GLES3)
|
||||
if (gl_extensions.GLES3) {
|
||||
features |= GPU_SUPPORTS_GLSL_ES_300;
|
||||
// Mali reports 30 but works fine...
|
||||
if (gl_extensions.range[1][5][1] >= 30) {
|
||||
features |= GPU_SUPPORTS_32BIT_INT_FSHADER;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (gl_extensions.VersionGEThan(3, 3, 0))
|
||||
if (gl_extensions.VersionGEThan(3, 3, 0)) {
|
||||
features |= GPU_SUPPORTS_GLSL_330;
|
||||
features |= GPU_SUPPORTS_32BIT_INT_FSHADER;
|
||||
}
|
||||
}
|
||||
|
||||
if (gl_extensions.EXT_shader_framebuffer_fetch || gl_extensions.NV_shader_framebuffer_fetch || gl_extensions.ARM_shader_framebuffer_fetch) {
|
||||
|
@ -444,7 +444,9 @@ void TextureCacheGLES::ApplyTextureFramebuffer(TexCacheEntry *entry, VirtualFram
|
||||
uint32_t clutMode = gstate.clutformat & 0xFFFFFF;
|
||||
|
||||
bool useShaderDepal = framebufferManager_->GetCurrentRenderVFB() != framebuffer && gstate_c.Supports(GPU_SUPPORTS_GLSL_ES_300);
|
||||
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_32BIT_INT_FSHADER)) {
|
||||
useShaderDepal = false;
|
||||
}
|
||||
if ((entry->status & TexCacheEntry::STATUS_DEPALETTIZE) && !g_Config.bDisableSlowFramebufEffects) {
|
||||
if (useShaderDepal) {
|
||||
const GEPaletteFormat clutFormat = gstate.getClutPaletteFormat();
|
||||
|
@ -466,7 +466,6 @@ enum {
|
||||
GPU_SUPPORTS_DUALSOURCE_BLEND = FLAG_BIT(0),
|
||||
GPU_SUPPORTS_GLSL_ES_300 = FLAG_BIT(1),
|
||||
GPU_SUPPORTS_GLSL_330 = FLAG_BIT(2),
|
||||
GPU_SUPPORTS_UNPACK_SUBIMAGE = FLAG_BIT(3),
|
||||
GPU_SUPPORTS_BLEND_MINMAX = FLAG_BIT(4),
|
||||
GPU_SUPPORTS_LOGIC_OP = FLAG_BIT(5),
|
||||
GPU_USE_DEPTH_RANGE_HACK = FLAG_BIT(6),
|
||||
@ -478,6 +477,7 @@ enum {
|
||||
GPU_SUPPORTS_TEXTURE_FLOAT = FLAG_BIT(12),
|
||||
GPU_SUPPORTS_16BIT_FORMATS = FLAG_BIT(13),
|
||||
GPU_SUPPORTS_DEPTH_CLAMP = FLAG_BIT(14),
|
||||
GPU_SUPPORTS_32BIT_INT_FSHADER = FLAG_BIT(15),
|
||||
GPU_SUPPORTS_LARGE_VIEWPORTS = FLAG_BIT(16),
|
||||
GPU_SUPPORTS_ACCURATE_DEPTH = FLAG_BIT(17),
|
||||
GPU_SUPPORTS_VAO = FLAG_BIT(18),
|
||||
|
@ -405,6 +405,13 @@ void SystemInfoScreen::CreateViews() {
|
||||
#endif
|
||||
if (GetGPUBackend() == GPUBackend::OPENGL) {
|
||||
deviceSpecs->Add(new InfoItem(si->T("Core Context"), gl_extensions.IsCoreContext ? di->T("Active") : di->T("Inactive")));
|
||||
int highp_int_min = gl_extensions.range[1][5][0];
|
||||
int highp_int_max = gl_extensions.range[1][5][1];
|
||||
if (highp_int_max != 0) {
|
||||
char highp_int_range[512];
|
||||
snprintf(highp_int_range, sizeof(highp_int_range), "Highp int range: %d-%d", highp_int_min, highp_int_max);
|
||||
deviceSpecs->Add(new InfoItem(si->T("High precision int range"), highp_int_range));
|
||||
}
|
||||
}
|
||||
deviceSpecs->Add(new ItemHeader(si->T("OS Information")));
|
||||
deviceSpecs->Add(new InfoItem(si->T("Memory Page Size"), StringFromFormat(si->T("%d bytes"), GetMemoryProtectPageSize())));
|
||||
|
@ -6,7 +6,7 @@
|
||||
// Utility to be able to liberally sprinkle GL error checks around your code
|
||||
// and easily disable them all in release builds - just undefine DEBUG_OPENGL.
|
||||
|
||||
// #define DEBUG_OPENGL
|
||||
#define DEBUG_OPENGL
|
||||
|
||||
#if defined(DEBUG_OPENGL)
|
||||
|
||||
|
@ -463,6 +463,13 @@ void CheckGLExtensions() {
|
||||
glGetShaderPrecisionFormat(shaderTypes[st], precisions[p], gl_extensions.range[st][p], &gl_extensions.precision[st][p]);
|
||||
}
|
||||
}
|
||||
|
||||
// Now, Adreno lies. So let's override it.
|
||||
if (gl_extensions.gpuVendor == GPU_VENDOR_QUALCOMM) {
|
||||
WLOG("Detected Adreno - lowering int precision");
|
||||
gl_extensions.range[1][5][0] = 15;
|
||||
gl_extensions.range[1][5][1] = 15;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -776,6 +776,7 @@ void GLQueueRunner::PerformRenderPass(const GLRStep &step) {
|
||||
}
|
||||
case GLRRenderCommand::UNIFORM4F:
|
||||
{
|
||||
CHECK_GL_ERROR_IF_DEBUG();
|
||||
int loc = c.uniform4.loc ? *c.uniform4.loc : -1;
|
||||
if (c.uniform4.name) {
|
||||
loc = curProgram->GetUniformLoc(c.uniform4.name);
|
||||
|
@ -640,6 +640,7 @@ public:
|
||||
data.uniform4.loc = loc;
|
||||
data.uniform4.count = count;
|
||||
memcpy(data.uniform4.v, udata, sizeof(float) * count);
|
||||
|
||||
curRenderStep_->commands.push_back(data);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user