mirror of
https://github.com/libretro/ppsspp.git
synced 2025-01-20 23:54:48 +00:00
Switch to old-style depth in OpenGL.
Some devices really aren't handling the new method well. There seem to be accuracy problems when specifying the values to OpenGL.
This commit is contained in:
parent
87b75235e7
commit
f1c06d25ea
@ -511,6 +511,10 @@ float DepthSliceFactor() {
|
||||
|
||||
// This is used for float values which might not be integers, but are in the integer scale of 65535.
|
||||
static float ToScaledDepthFromInteger(float z) {
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_ACCURATE_DEPTH)) {
|
||||
return z * (1.0f / 65535.0f);
|
||||
}
|
||||
|
||||
const float depthSliceFactor = DepthSliceFactor();
|
||||
if (gstate_c.Supports(GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT)) {
|
||||
const double doffset = 0.5 * (depthSliceFactor - 1.0) * (1.0 / depthSliceFactor);
|
||||
@ -690,8 +694,19 @@ void ConvertViewportAndScissor(bool useBufferedRendering, float renderWidth, flo
|
||||
// This adjusts the center from halfActualZRange to vpZCenter.
|
||||
float zOffset = halfActualZRange < std::numeric_limits<float>::epsilon() ? 0.0f : (vpZCenter - (minz + halfActualZRange)) / halfActualZRange;
|
||||
|
||||
out.depthRangeMin = ToScaledDepthFromInteger(minz);
|
||||
out.depthRangeMax = ToScaledDepthFromInteger(maxz);
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_ACCURATE_DEPTH)) {
|
||||
zScale = 1.0f;
|
||||
zOffset = 0.0f;
|
||||
out.depthRangeMin = ToScaledDepthFromInteger(vpZCenter - vpZScale);
|
||||
out.depthRangeMax = ToScaledDepthFromInteger(vpZCenter + vpZScale);
|
||||
} else {
|
||||
out.depthRangeMin = ToScaledDepthFromInteger(minz);
|
||||
out.depthRangeMax = ToScaledDepthFromInteger(maxz);
|
||||
}
|
||||
|
||||
// OpenGL will clamp these for us anyway, and Direct3D will error if not clamped.
|
||||
out.depthRangeMin = std::max(out.depthRangeMin, 0.0f);
|
||||
out.depthRangeMax = std::min(out.depthRangeMax, 1.0f);
|
||||
|
||||
bool scaleChanged = gstate_c.vpWidthScale != wScale || gstate_c.vpHeightScale != hScale;
|
||||
bool offsetChanged = gstate_c.vpXOffset != xOffset || gstate_c.vpYOffset != yOffset;
|
||||
|
@ -477,6 +477,7 @@ void DIRECTX9_GPU::CheckGPUFeatures() {
|
||||
features |= GPU_SUPPORTS_BLEND_MINMAX;
|
||||
features |= GPU_SUPPORTS_TEXTURE_LOD_CONTROL;
|
||||
features |= GPU_PREFER_CPU_DOWNLOAD;
|
||||
features |= GPU_SUPPORTS_ACCURATE_DEPTH;
|
||||
|
||||
if (!g_Config.bHighQualityDepth) {
|
||||
features |= GPU_SCALE_DEPTH_FROM_24BIT_TO_16BIT;
|
||||
|
@ -679,12 +679,16 @@ bool GenerateFragmentShader(const ShaderID &id, char *buffer) {
|
||||
const double scale = DepthSliceFactor() * 65535.0;
|
||||
|
||||
WRITE(p, " highp float z = gl_FragCoord.z;\n");
|
||||
// We center the depth with an offset, but only its fraction matters.
|
||||
// When (DepthSliceFactor() - 1) is odd, it will be 0.5, otherwise 0.
|
||||
if (((int)(DepthSliceFactor() - 1.0f) & 1) == 1) {
|
||||
WRITE(p, " z = (floor((z * %f) - (1.0 / 2.0)) + (1.0 / 2.0)) * (1.0 / %f);\n", scale, scale);
|
||||
if (gstate_c.Supports(GPU_SUPPORTS_ACCURATE_DEPTH)) {
|
||||
// We center the depth with an offset, but only its fraction matters.
|
||||
// When (DepthSliceFactor() - 1) is odd, it will be 0.5, otherwise 0.
|
||||
if (((int)(DepthSliceFactor() - 1.0f) & 1) == 1) {
|
||||
WRITE(p, " z = (floor((z * %f) - (1.0 / 2.0)) + (1.0 / 2.0)) * (1.0 / %f);\n", scale, scale);
|
||||
} else {
|
||||
WRITE(p, " z = floor(z * %f) * (1.0 / %f);\n", scale, scale);
|
||||
}
|
||||
} else {
|
||||
WRITE(p, " z = floor(z * %f) * (1.0 / %f);\n", scale, scale);
|
||||
WRITE(p, " z = (1.0/65535.0) * floor(z * 65535.0);\n");
|
||||
}
|
||||
WRITE(p, " gl_FragDepth = z;\n");
|
||||
}
|
||||
|
@ -598,8 +598,13 @@ void LinkedShader::UpdateUniforms(u32 vertType, const ShaderID &vsid) {
|
||||
float minz = -((gstate_c.vpZOffset * halfActualZRange) - vpZCenter) - halfActualZRange;
|
||||
float viewZScale = halfActualZRange;
|
||||
float viewZCenter = minz + halfActualZRange;
|
||||
float viewZInvScale;
|
||||
|
||||
if (!gstate_c.Supports(GPU_SUPPORTS_ACCURATE_DEPTH)) {
|
||||
viewZScale = vpZScale;
|
||||
viewZCenter = vpZCenter;
|
||||
}
|
||||
|
||||
float viewZInvScale;
|
||||
if (viewZScale != 0.0) {
|
||||
viewZInvScale = 1.0f / viewZScale;
|
||||
} else {
|
||||
|
@ -455,6 +455,7 @@ enum {
|
||||
GPU_SUPPORTS_UNPACK_SUBIMAGE = FLAG_BIT(3),
|
||||
GPU_SUPPORTS_BLEND_MINMAX = FLAG_BIT(4),
|
||||
GPU_SUPPORTS_LOGIC_OP = FLAG_BIT(5),
|
||||
GPU_SUPPORTS_ACCURATE_DEPTH = FLAG_BIT(17),
|
||||
GPU_SUPPORTS_VAO = FLAG_BIT(18),
|
||||
GPU_SUPPORTS_ANY_COPY_IMAGE = FLAG_BIT(19),
|
||||
GPU_SUPPORTS_ANY_FRAMEBUFFER_FETCH = FLAG_BIT(20),
|
||||
|
Loading…
x
Reference in New Issue
Block a user