Remove/deprecate GPU tick sync

This commit is contained in:
Henrik Rydgård 2017-11-05 23:07:37 +01:00
parent 8c69be9bfa
commit 9e35144b96
5 changed files with 4 additions and 32 deletions

View File

@ -57,10 +57,6 @@ static int geSyncEvent;
static int geInterruptEvent;
static int geCycleEvent;
// Let's try updating 10 times per vblank - this is the interval for geCycleEvent.
const int geIntervalUs = 1000000 / (60 * 10);
const int geBehindThresholdUs = 1000000 / (60 * 10);
class GeIntrHandler : public IntrHandler {
public:
GeIntrHandler() : IntrHandler(PSP_GE_INTR) {}
@ -193,19 +189,8 @@ static void __GeExecuteInterrupt(u64 userdata, int cyclesLate) {
__TriggerInterrupt(PSP_INTR_IMMEDIATE, PSP_GE_INTR, PSP_INTR_SUB_NONE);
}
// Should we still do this?
static void __GeCheckCycles(u64 userdata, int cyclesLate) {
u64 geTicks = gpu->GetTickEstimate();
if (geTicks != 0) {
if (CoreTiming::GetTicks() > geTicks + usToCycles(geBehindThresholdUs)) {
u64 diff = CoreTiming::GetTicks() - geTicks;
CoreTiming::Advance();
}
}
// This may get out of step if we synced (because we don't correct for cyclesLate),
// but that's okay - __GeCheckCycles is a very rough way to synchronize anyway.
CoreTiming::ScheduleEvent(usToCycles(geIntervalUs), geCycleEvent, 0);
// Deprecated
}
void __GeInit() {
@ -216,15 +201,12 @@ void __GeInit() {
geSyncEvent = CoreTiming::RegisterEvent("GeSyncEvent", &__GeExecuteSync);
geInterruptEvent = CoreTiming::RegisterEvent("GeInterruptEvent", &__GeExecuteInterrupt);
// Deprecated
geCycleEvent = CoreTiming::RegisterEvent("GeCycleEvent", &__GeCheckCycles);
listWaitingThreads.clear();
drawWaitingThreads.clear();
// When we're using separate CPU/GPU threads, we need to keep them in sync.
if (IsOnSeparateCPUThread()) {
CoreTiming::ScheduleEvent(usToCycles(geIntervalUs), geCycleEvent, 0);
}
}
struct GeInterruptData_v1 {

View File

@ -400,6 +400,7 @@ void SoftwareTransform(
// rectangle out of many. Quite a small optimization though.
// Experiment: Disable on PowerVR (see issue #6290)
// TODO: This bleeds outside the play area in non-buffered mode. Big deal? Probably not.
// TODO: Allow creating a depth clear and a color draw.
bool reallyAClear = false;
if (maxIndex > 1 && prim == GE_PRIM_RECTANGLES && gstate.isModeClear()) {
int scissorX2 = gstate.getScissorX2() + 1;

View File

@ -395,7 +395,6 @@ void GPUCommon::Reinitialize() {
busyTicks = 0;
timeSpentStepping_ = 0.0;
interruptsEnabled_ = true;
curTickEst_ = 0;
}
int GPUCommon::EstimatePerVertexCost() {
@ -1023,7 +1022,6 @@ int GPUCommon::GetNextListIndex() {
void GPUCommon::ProcessDLQueue() {
startingTicks = CoreTiming::GetTicks();
cyclesExecuted = 0;
curTickEst_ = std::max(busyTicks, startingTicks + cyclesExecuted);
// Seems to be correct behaviour to process the list anyway?
if (startingTicks < busyTicks) {
@ -1042,7 +1040,6 @@ void GPUCommon::ProcessDLQueue() {
// At the end, we can remove it from the queue and continue.
dlQueue.erase(std::remove(dlQueue.begin(), dlQueue.end(), listIndex), dlQueue.end());
}
curTickEst_ = std::max(busyTicks, startingTicks + cyclesExecuted);
}
}
@ -1052,7 +1049,6 @@ void GPUCommon::ProcessDLQueue() {
busyTicks = std::max(busyTicks, drawCompleteTicks);
__GeTriggerSync(GPU_SYNC_DRAW, 1, drawCompleteTicks);
// Since the event is in CoreTiming, we're in sync. Just set 0 now.
curTickEst_ = 0;
}
void GPUCommon::PreExecuteOp(u32 op, u32 diff) {

View File

@ -127,10 +127,6 @@ public:
// Note: Not virtual!
inline void Flush();
u64 GetTickEstimate() override {
return curTickEst_;
}
#ifdef USE_CRT_DBG
#undef new
#endif
@ -290,8 +286,6 @@ protected:
GEPrimitiveType lastPrim_;
private:
u64 curTickEst_;
// Debug stats.
double timeSteppingStarted_;
double timeSpentStepping_;

View File

@ -223,7 +223,6 @@ public:
virtual void DeviceLost() = 0;
virtual void DeviceRestore() = 0;
virtual void ReapplyGfxState() = 0;
virtual u64 GetTickEstimate() = 0;
virtual void DoState(PointerWrap &p) = 0;
// Called by the window system if the window size changed. This will be reflected in PSPCoreParam.pixel*.