Recalculate numWaitThreads all the time.

Before, it could go negative by having a thread woken a second time
(e.g. because of a delete) before it actually scheduled and woke up.
This commit is contained in:
Unknown W. Brackets 2013-05-27 22:44:10 -07:00
parent 40877cf813
commit 77b0688d92
4 changed files with 58 additions and 35 deletions

View File

@ -160,8 +160,6 @@ bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error,
{
if (!__KernelEventFlagMatches(&e->nef.currentPattern, th.bits, th.wait, th.outAddr))
return false;
e->nef.numWaitThreads--;
}
else
{
@ -219,7 +217,6 @@ void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId)
waitData = *t;
// TODO: Hmm, what about priority/fifo order? Does it lose its place in line?
flag->waitingThreads.erase(flag->waitingThreads.begin() + i);
flag->nef.numWaitThreads--;
break;
}
}
@ -267,7 +264,6 @@ void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &r
EventFlagTh waitData = flag->pausedWaits[pauseKey];
u64 waitDeadline = waitData.pausedTimeout;
flag->pausedWaits.erase(pauseKey);
flag->nef.numWaitThreads++;
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
@ -342,11 +338,11 @@ u32 sceKernelCancelEventFlag(SceUID uid, u32 pattern, u32 numWaitThreadsPtr)
EventFlag *e = kernelObjects.Get<EventFlag>(uid, error);
if (e)
{
e->nef.numWaitThreads = (int) e->waitingThreads.size();
if (Memory::IsValidAddress(numWaitThreadsPtr))
Memory::Write_U32(e->nef.numWaitThreads, numWaitThreadsPtr);
e->nef.currentPattern = pattern;
e->nef.numWaitThreads = 0;
if (__KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_CANCEL))
hleReSchedule("event flag canceled");
@ -451,7 +447,6 @@ void __KernelEventFlagTimeout(u64 userdata, int cycleslate)
// actually running, it will get a DELETE result instead of a TIMEOUT.
// So, we need to remember it or we won't be able to mark it DELETE instead later.
__KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads);
e->nef.numWaitThreads--;
break;
}
}
@ -520,11 +515,10 @@ int sceKernelWaitEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 ti
timeout = Memory::Read_U32(timeoutPtr);
// Do we allow more than one thread to wait?
if (e->nef.numWaitThreads > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
return SCE_KERNEL_ERROR_EVF_MULTI;
// No match - must wait.
e->nef.numWaitThreads++;
th.tid = __KernelGetCurThread();
th.bits = bits;
th.wait = wait;
@ -584,11 +578,10 @@ int sceKernelWaitEventFlagCB(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32
timeout = Memory::Read_U32(timeoutPtr);
// Do we allow more than one thread to wait?
if (e->nef.numWaitThreads > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
return SCE_KERNEL_ERROR_EVF_MULTI;
// No match - must wait.
e->nef.numWaitThreads++;
th.tid = __KernelGetCurThread();
th.bits = bits;
th.wait = wait;
@ -641,7 +634,7 @@ int sceKernelPollEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 ti
if (Memory::IsValidAddress(outBitsPtr))
Memory::Write_U32(e->nef.currentPattern, outBitsPtr);
if (e->nef.numWaitThreads > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0)
return SCE_KERNEL_ERROR_EVF_MULTI;
// No match - return that, this is polling, not waiting.
@ -670,6 +663,16 @@ u32 sceKernelReferEventFlagStatus(SceUID id, u32 statusPtr)
if (!Memory::IsValidAddress(statusPtr))
return -1;
u32 error;
for (auto iter = e->waitingThreads.begin(); iter != e->waitingThreads.end(); ++iter)
{
SceUID waitID = __KernelGetWaitID(iter->tid, WAITTYPE_EVENTFLAG, error);
// The thread is no longer waiting for this, clean it up.
if (waitID != id)
e->waitingThreads.erase(iter--);
}
e->nef.numWaitThreads = (int) e->waitingThreads.size();
if (Memory::Read_U32(statusPtr) != 0)
Memory::WriteStruct(statusPtr, &e->nef);
return 0;

View File

@ -890,8 +890,6 @@ bool __KernelUnlockVplForThread(VPL *vpl, VplWaitingThread &threadInfo, u32 &err
Memory::Write_U32(addr, threadInfo.addrPtr);
else
return false;
vpl->nv.numWaitThreads--;
}
if (timeoutPtr != 0 && vplWaitTimer != -1)
@ -1073,8 +1071,6 @@ void __KernelVplTimeout(u64 userdata, int cyclesLate)
// The reason is, if it times out, but what it was waiting on is DELETED prior to it
// actually running, it will get a DELETE result instead of a TIMEOUT.
// So, we need to remember it or we won't be able to mark it DELETE instead later.
vpl->nv.numWaitThreads--;
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
}
@ -1108,8 +1104,6 @@ int sceKernelAllocateVpl(SceUID uid, u32 size, u32 addrPtr, u32 timeoutPtr)
{
if (vpl)
{
vpl->nv.numWaitThreads++;
SceUID threadID = __KernelGetCurThread();
__KernelVplRemoveThread(vpl, threadID);
VplWaitingThread waiting = {threadID, addrPtr};
@ -1209,9 +1203,9 @@ int sceKernelCancelVpl(SceUID uid, u32 numWaitThreadsPtr)
VPL *vpl = kernelObjects.Get<VPL>(uid, error);
if (vpl)
{
vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size();
if (Memory::IsValidAddress(numWaitThreadsPtr))
Memory::Write_U32(vpl->nv.numWaitThreads, numWaitThreadsPtr);
vpl->nv.numWaitThreads = 0;
bool wokeThreads = __KernelClearVplThreads(vpl, SCE_KERNEL_ERROR_WAIT_CANCEL);
if (wokeThreads)
@ -1230,6 +1224,17 @@ int sceKernelReferVplStatus(SceUID uid, u32 infoPtr)
if (vpl)
{
DEBUG_LOG(HLE, "sceKernelReferVplStatus(%i, %08x)", uid, infoPtr);
u32 error;
for (auto iter = vpl->waitingThreads.begin(); iter != vpl->waitingThreads.end(); ++iter)
{
SceUID waitID = __KernelGetWaitID(iter->threadID, WAITTYPE_VPL, error);
// The thread is no longer waiting for this, clean it up.
if (waitID != uid)
vpl->waitingThreads.erase(iter--);
}
vpl->nv.numWaitThreads = (int) vpl->waitingThreads.size();
vpl->nv.freeSize = vpl->alloc.GetTotalFreeBytes();
if (Memory::IsValidAddress(infoPtr) && Memory::Read_U32(infoPtr))
Memory::WriteStruct(infoPtr, &vpl->nv);

View File

@ -692,7 +692,15 @@ int sceKernelReferMutexStatus(SceUID id, u32 infoAddr)
// Don't write if the size is 0. Anything else is A-OK, though, apparently.
if (Memory::Read_U32(infoAddr) != 0)
{
// Refresh and write
u32 error;
for (auto iter = m->waitingThreads.begin(); iter != m->waitingThreads.end(); ++iter)
{
SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_MUTEX, error);
// The thread is no longer waiting for this, clean it up.
if (waitID != id)
m->waitingThreads.erase(iter--);
}
m->nm.numWaitThreads = (int) m->waitingThreads.size();
Memory::WriteStruct(infoAddr, &m->nm);
}
@ -1144,6 +1152,15 @@ int __KernelReferLwMutexStatus(SceUID uid, u32 infoPtr)
{
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(m->nm.workareaPtr);
u32 error;
for (auto iter = m->waitingThreads.begin(); iter != m->waitingThreads.end(); ++iter)
{
SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_LWMUTEX, error);
// The thread is no longer waiting for this, clean it up.
if (waitID != uid)
m->waitingThreads.erase(iter--);
}
// Refresh and write
m->nm.currentCount = workarea->lockLevel;
m->nm.lockThread = workarea->lockThread == 0 ? -1 : workarea->lockThread;

View File

@ -115,7 +115,6 @@ bool __KernelUnlockSemaForThread(Semaphore *s, SceUID threadID, u32 &error, int
return false;
s->ns.currentCount -= wVal;
s->ns.numWaitThreads--;
}
if (timeoutPtr != 0 && semaWaitTimer != -1)
@ -157,7 +156,6 @@ void __KernelSemaBeginCallback(SceUID threadID, SceUID prevCallbackId)
// TODO: Hmm, what about priority/fifo order? Does it lose its place in line?
s->waitingThreads.erase(std::remove(s->waitingThreads.begin(), s->waitingThreads.end(), threadID), s->waitingThreads.end());
s->ns.numWaitThreads--;
DEBUG_LOG(HLE, "sceKernelWaitSemaCB: Suspending sema wait for callback");
}
@ -188,7 +186,6 @@ void __KernelSemaEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &return
u64 waitDeadline = s->pausedWaitTimeouts[pauseKey];
s->pausedWaitTimeouts.erase(pauseKey);
s->ns.numWaitThreads++;
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
@ -205,7 +202,6 @@ void __KernelSemaEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &return
Memory::Write_U32(0, timeoutPtr);
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
s->ns.numWaitThreads--;
}
else
{
@ -244,6 +240,7 @@ int sceKernelCancelSema(SceUID id, int newCount, u32 numWaitThreadsPtr)
if (newCount > s->ns.maxCount)
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
s->ns.numWaitThreads = (int) s->waitingThreads.size();
if (Memory::IsValidAddress(numWaitThreadsPtr))
Memory::Write_U32(s->ns.numWaitThreads, numWaitThreadsPtr);
@ -251,7 +248,6 @@ int sceKernelCancelSema(SceUID id, int newCount, u32 numWaitThreadsPtr)
s->ns.currentCount = s->ns.initCount;
else
s->ns.currentCount = newCount;
s->ns.numWaitThreads = 0;
if (__KernelClearSemaThreads(s, SCE_KERNEL_ERROR_WAIT_CANCEL))
hleReSchedule("semaphore canceled");
@ -336,6 +332,16 @@ int sceKernelReferSemaStatus(SceUID id, u32 infoPtr)
if (!Memory::IsValidAddress(infoPtr))
return -1;
u32 error;
for (auto iter = s->waitingThreads.begin(); iter != s->waitingThreads.end(); ++iter)
{
SceUID waitID = __KernelGetWaitID(*iter, WAITTYPE_SEMA, error);
// The thread is no longer waiting for this, clean it up.
if (waitID != id)
s->waitingThreads.erase(iter--);
}
s->ns.numWaitThreads = (int) s->waitingThreads.size();
if (Memory::Read_U32(infoPtr) != 0)
Memory::WriteStruct(infoPtr, &s->ns);
return 0;
@ -353,7 +359,7 @@ int sceKernelSignalSema(SceUID id, int signal)
Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
if (s)
{
if (s->ns.currentCount + signal - s->ns.numWaitThreads > s->ns.maxCount)
if (s->ns.currentCount + signal - (int) s->waitingThreads.size() > s->ns.maxCount)
return SCE_KERNEL_ERROR_SEMA_OVF;
int oldval = s->ns.currentCount;
@ -403,8 +409,6 @@ void __KernelSemaTimeout(u64 userdata, int cycleslate)
// The reason is, if it times out, but what it was waiting on is DELETED prior to it
// actually running, it will get a DELETE result instead of a TIMEOUT.
// So, we need to remember it or we won't be able to mark it DELETE instead later.
s->ns.numWaitThreads--;
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
}
@ -437,14 +441,10 @@ int __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *bad
// If there are any callbacks, we always wait, and wake after the callbacks.
bool hasCallbacks = processCallbacks && __KernelCurHasReadyCallbacks();
if (s->ns.currentCount >= wantedCount && s->ns.numWaitThreads == 0 && !hasCallbacks)
if (s->ns.currentCount >= wantedCount && s->waitingThreads.size() == 0 && !hasCallbacks)
{
if (hasCallbacks)
{
// __KernelSemaBeginCallback() will decrement this, so increment it here.
// TODO: Clean this up a bit better.
s->ns.numWaitThreads++;
// Might actually end up having to wait, so set the timeout.
__KernelSetSemaTimeout(s, timeoutPtr);
__KernelWaitCallbacksCurThread(WAITTYPE_SEMA, id, wantedCount, timeoutPtr);
@ -454,8 +454,6 @@ int __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *bad
}
else
{
s->ns.numWaitThreads++;
SceUID threadID = __KernelGetCurThread();
// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
if (std::find(s->waitingThreads.begin(), s->waitingThreads.end(), threadID) == s->waitingThreads.end())
@ -499,7 +497,7 @@ int sceKernelPollSema(SceUID id, int wantedCount)
Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
if (s)
{
if (s->ns.currentCount >= wantedCount && s->ns.numWaitThreads == 0)
if (s->ns.currentCount >= wantedCount && s->waitingThreads.size() == 0)
{
s->ns.currentCount -= wantedCount;
return 0;