Merge pull request #70 from unknownbrackets/mutexes

Mutexes
This commit is contained in:
Henrik Rydgård 2012-11-21 01:48:44 -08:00
commit 3934876908
12 changed files with 1064 additions and 203 deletions

View File

@ -249,6 +249,51 @@ void ScheduleEvent(int cyclesIntoFuture, int event_type, u64 userdata)
AddEventToQueue(ne);
}
// Returns cycles left in timer.
int UnscheduleEvent(int event_type, u64 userdata)
{
int result = 0;
if (!first)
return result;
while(first)
{
if (first->type == event_type && first->userdata == userdata)
{
result = (int)(first->time - globalTimer);
Event *next = first->next;
FreeEvent(first);
first = next;
}
else
{
break;
}
}
if (!first)
return result;
Event *prev = first;
Event *ptr = prev->next;
while (ptr)
{
if (ptr->type == event_type && ptr->userdata == userdata)
{
result = (int)(ptr->time - globalTimer);
prev->next = ptr->next;
FreeEvent(ptr);
ptr = prev->next;
}
else
{
prev = ptr;
ptr = ptr->next;
}
}
return result;
}
void RegisterAdvanceCallback(void (*callback)(int cyclesExecuted))
{
advanceCallback = callback;

View File

@ -58,6 +58,10 @@ inline int usToCycles(int us) {
return (int)(CPU_HZ / 1000000 * us);
}
inline int cyclesToUs(int cycles) {
return cycles / (CPU_HZ / 1000000);
}
namespace CoreTiming
{
void Init();
@ -77,8 +81,8 @@ namespace CoreTiming
void ScheduleEvent(int cyclesIntoFuture, int event_type, u64 userdata=0);
void ScheduleEvent_Threadsafe(int cyclesIntoFuture, int event_type, u64 userdata=0);
void ScheduleEvent_Threadsafe_Immediate(int event_type, u64 userdata=0);
int UnscheduleEvent(int event_type, u64 userdata);
// We only permit one event of each type in the queue at a time.
void RemoveEvent(int event_type);
void RemoveThreadsafeEvent(int event_type);
void RemoveAllEvents(int event_type);

View File

@ -108,6 +108,10 @@ template<void func(int, u32)> void WrapV_IU() {
func(PARAM(0), PARAM(1));
}
template<void func(u32, int)> void WrapV_UI() {
func(PARAM(0), PARAM(1));
}
template<int func(const char *, u32)> void WrapI_CU() {
int retval = func(Memory::GetCharPointer(PARAM(0)), PARAM(1));
RETURN(retval);
@ -146,6 +150,10 @@ template<void func(int, int, u32)> void WrapV_IIU() {
func(PARAM(0), PARAM(1), PARAM(2));
}
template<void func(u32, int, u32)> void WrapV_UIU() {
func(PARAM(0), PARAM(1), PARAM(2));
}
template<void func(int, u32, u32, u32, u32)> void WrapV_IUUUU() {
func(PARAM(0), PARAM(1), PARAM(2), PARAM(3), PARAM(4));
}
@ -158,6 +166,14 @@ template<void func(u32, u32, u32, u32)> void WrapV_UUUU() {
func(PARAM(0), PARAM(1), PARAM(2), PARAM(3));
}
template<void func(const char *, u32, int, u32)> void WrapV_CUIU() {
func(Memory::GetCharPointer(PARAM(0)), PARAM(1), PARAM(2), PARAM(3));
}
template<void func(u32, const char *, u32, int, u32)> void WrapV_UCUIU() {
func(PARAM(0), Memory::GetCharPointer(PARAM(1)), PARAM(2), PARAM(3), PARAM(4));
}
template<void func(const char *, u32, int, int, u32)> void WrapV_CUIIU() {
func(Memory::GetCharPointer(PARAM(0)), PARAM(1), PARAM(2), PARAM(3), PARAM(4));
}

View File

@ -337,14 +337,14 @@ const HLEFunction ThreadManForUser[] =
{0x4E3A1105,&WrapV_IIU<sceKernelWaitSema>, "sceKernelWaitSema"},
{0x6d212bac,&WrapV_IIU<sceKernelWaitSemaCB>, "sceKernelWaitSemaCB"},
{0x60107536,0,"sceKernelDeleteLwMutex"},
{0x19CFF145,0,"sceKernelCreateLwMutex"},
{0xf8170fbe,&WrapU_U<sceKernelDeleteMutex>,"sceKernelDeleteMutex"},
{0xB011B11F,&WrapU_UUU<sceKernelLockMutex>,"sceKernelLockMutex"},
{0x5bf4dd27,&WrapU_UUU<sceKernelLockMutexCB>,"sceKernelLockMutexCB"},
{0x6b30100f,&WrapU_UU<sceKernelUnlockMutex>,"sceKernelUnlockMutex"},
{0xb7d098c6,&WrapU_CUU<sceKernelCreateMutex>,"sceKernelCreateMutex"},
{0x0DDCD2C9, 0, "sceKernelTryLockMutex"},
{0x60107536,&WrapV_U<sceKernelDeleteLwMutex>, "sceKernelDeleteLwMutex"},
{0x19CFF145,&WrapV_UCUIU<sceKernelCreateLwMutex>, "sceKernelCreateLwMutex"},
{0xf8170fbe,&WrapV_I<sceKernelDeleteMutex>, "sceKernelDeleteMutex"},
{0xB011B11F,&WrapV_IIU<sceKernelLockMutex>, "sceKernelLockMutex"},
{0x5bf4dd27,&WrapV_IIU<sceKernelLockMutexCB>, "sceKernelLockMutexCB"},
{0x6b30100f,&WrapV_II<sceKernelUnlockMutex>, "sceKernelUnlockMutex"},
{0xb7d098c6,&WrapV_CUIU<sceKernelCreateMutex>, "sceKernelCreateMutex"},
{0x0DDCD2C9,&WrapV_II<sceKernelTryLockMutex>, "sceKernelTryLockMutex"},
// NOTE: LockLwMutex and UnlockLwMutex are in Kernel_Library, see sceKernelInterrupt.cpp.
{0xFCCFAD26,sceKernelCancelWakeupThread,"sceKernelCancelWakeupThread"},
@ -371,7 +371,7 @@ const HLEFunction ThreadManForUser[] =
{0x912354a7,sceKernelRotateThreadReadyQueue,"sceKernelRotateThreadReadyQueue"},
{0x9ACE131E,sceKernelSleepThread,"sceKernelSleepThread"},
{0x82826f70,sceKernelSleepThreadCB,"sceKernelSleepThreadCB"},
{0xF475845D,&WrapU_V<sceKernelStartThread>,"sceKernelStartThread"},
{0xF475845D,&WrapV_IUU<sceKernelStartThread>,"sceKernelStartThread"},
{0x9944f31f,sceKernelSuspendThread,"sceKernelSuspendThread"},
{0x616403ba,0,"sceKernelTerminateThread"},
{0x383f7bcc,sceKernelTerminateDeleteThread,"sceKernelTerminateDeleteThread"},

View File

@ -26,6 +26,7 @@
#include "sceKernel.h"
#include "sceKernelThread.h"
#include "sceKernelInterrupt.h"
#include "sceKernelMutex.h"
struct Interrupt
{
@ -428,11 +429,11 @@ const HLEFunction Kernel_Library[] =
{0x47a0b729,sceKernelIsCpuIntrSuspended, "sceKernelIsCpuIntrSuspended"}, //flags
{0xb55249d2,sceKernelIsCpuIntrEnable, "sceKernelIsCpuIntrEnable"},
{0xa089eca4,sceKernelMemset, "sceKernelMemset"},
{0xDC692EE3,0, "sceKernelTryLockLwMutex"},
{0x37431849,0, "sceKernelTryLockLwMutex_600"},
{0xbea46419,0, "sceKernelLockLwMutex"},
{0x1FC64E09,0, "sceKernelLockLwMutexCB"},
{0x15b6446b,0, "sceKernelUnlockLwMutex"},
{0xDC692EE3,&WrapV_UI<sceKernelTryLockLwMutex>, "sceKernelTryLockLwMutex"},
{0x37431849,&WrapV_UI<sceKernelTryLockLwMutex_600>, "sceKernelTryLockLwMutex_600"},
{0xbea46419,&WrapV_UIU<sceKernelLockLwMutex>, "sceKernelLockLwMutex"},
{0x1FC64E09,&WrapV_UIU<sceKernelLockLwMutexCB>, "sceKernelLockLwMutexCB"},
{0x15b6446b,&WrapV_UI<sceKernelUnlockLwMutex>, "sceKernelUnlockLwMutex"},
{0x293b45b8,sceKernelGetThreadId, "sceKernelGetThreadId"},
{0x1839852A,0,"sce_paf_private_memcpy"},
{0xA089ECA4,0,"sce_paf_private_memset"},

View File

@ -17,8 +17,11 @@
// UNFINISHED
#include <algorithm>
#include <map>
#include "HLE.h"
#include "../MIPS/MIPS.h"
#include "../../Core/CoreTiming.h"
#include "sceKernel.h"
#include "sceKernelMutex.h"
#include "sceKernelThread.h"
@ -28,8 +31,20 @@
#define PSP_MUTEX_ATTR_ALLOW_RECURSIVE 0x200
// Not sure about the names of these
#define PSP_MUTEX_ERROR_NOT_LOCKED 0x800201C7
#define PSP_MUTEX_ERROR_NO_SUCH_MUTEX 0x800201C3
#define PSP_MUTEX_ERROR_TRYLOCK_FAILED 0x800201C4
#define PSP_MUTEX_ERROR_NOT_LOCKED 0x800201C5
#define PSP_MUTEX_ERROR_LOCK_OVERFLOW 0x800201C6
#define PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201C7
#define PSP_MUTEX_ERROR_ALREADY_LOCKED 0x800201C8
#define PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX 0x800201CA
// Note: used only for _600.
#define PSP_LWMUTEX_ERROR_TRYLOCK_FAILED 0x800201CB
#define PSP_LWMUTEX_ERROR_NOT_LOCKED 0x800201CC
#define PSP_LWMUTEX_ERROR_LOCK_OVERFLOW 0x800201CD
#define PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201CE
#define PSP_LWMUTEX_ERROR_ALREADY_LOCKED 0x800201CF
// Guesswork - not exposed anyway
struct NativeMutex
@ -46,139 +61,753 @@ struct Mutex : public KernelObject
{
const char *GetName() {return nm.name;}
const char *GetTypeName() {return "Mutex";}
static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_SEMID; } // Not sure?
static u32 GetMissingErrorCode() { return PSP_MUTEX_ERROR_NO_SUCH_MUTEX; }
int GetIDType() const { return SCE_KERNEL_TMID_Mutex; }
NativeMutex nm;
std::vector<SceUID> waitingThreads;
};
struct LWMutex : public KernelObject
{
const char *GetName() {return nm.name;}
const char *GetTypeName() {return "LWMutex";}
static u32 GetMissingErrorCode() { return SCE_KERNEL_ERROR_UNKNOWN_SEMID; } // Not sure?
int GetIDType() const { return SCE_KERNEL_TMID_LwMutex; }
NativeMutex nm;
std::vector<SceUID> waitingThreads;
};
u32 sceKernelCreateMutex(const char *name, u32 attr, u32 options)
{
DEBUG_LOG(HLE,"sceKernelCreateMutex(%s, %08x, %08x)", name, attr, options);
Mutex *mutex = new Mutex();
SceUID id = kernelObjects.Create(mutex);
mutex->nm.size = sizeof(mutex);
mutex->nm.attr = attr;
mutex->nm.lockLevel = 0;
mutex->nm.lockThread = -1;
strncpy(mutex->nm.name, name, 32);
return id;
}
u32 sceKernelDeleteMutex(u32 id)
{
DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (!mutex)
return PSP_MUTEX_ERROR_NO_SUCH_MUTEX;
kernelObjects.Destroy<Mutex>(id);
return 0;
}
u32 sceKernelLockMutex(u32 id, u32 count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockMutex(%i, %i, %08x)", id, count, timeoutPtr);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (!mutex)
return PSP_MUTEX_ERROR_NO_SUCH_MUTEX;
if (mutex->nm.lockLevel == 0)
{
mutex->nm.lockLevel += count;
mutex->nm.lockThread = __KernelGetCurThread();
// Nobody had it locked - no need to block
}
else if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) && mutex->nm.lockThread == __KernelGetCurThread())
{
// Recursive mutex, let's just increase the lock count and keep going
mutex->nm.lockLevel += count;
}
else
{
// Yeah, we need to block. Somehow.
ERROR_LOG(HLE,"Mutex should block!");
}
return 0;
}
u32 sceKernelLockMutexCB(u32 id, u32 count, u32 timeoutPtr)
{
ERROR_LOG(HLE,"UNIMPL sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
return 0;
}
u32 sceKernelUnlockMutex(u32 id, u32 count)
{
DEBUG_LOG(HLE,"UNFINISHED sceKernelUnlockMutex(%i, %i)", id, count);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (!mutex)
return PSP_MUTEX_ERROR_NO_SUCH_MUTEX;
if (mutex->nm.lockLevel == 0)
return PSP_MUTEX_ERROR_NOT_LOCKED;
mutex->nm.lockLevel -= count;
// TODO....
return 0;
}
// Guesswork - not exposed anyway
struct NativeLwMutex
{
SceSize size;
char name[32];
SceUInt attr;
SceUID mutexUid;
SceUInt opaqueWorkAreaAddr;
int numWaitThreads;
int locked;
int threadid; // thread holding the lock
SceUInt workareaPtr;
};
void sceKernelCreateLwMutex()
struct NativeLwMutexWorkarea
{
ERROR_LOG(HLE,"UNIMPL sceKernelCreateLwMutex()");
RETURN(0);
int lockLevel;
SceUID lockThread;
int attr;
int numWaitThreads;
SceUID uid;
int pad[3];
void init()
{
memset(this, 0, sizeof(NativeLwMutexWorkarea));
}
void clear()
{
lockLevel = 0;
lockThread = -1;
uid = -1;
}
};
struct LwMutex : public KernelObject
{
const char *GetName() {return nm.name;}
const char *GetTypeName() {return "LwMutex";}
static u32 GetMissingErrorCode() { return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX; }
int GetIDType() const { return SCE_KERNEL_TMID_LwMutex; }
NativeLwMutex nm;
std::vector<SceUID> waitingThreads;
};
bool mutexInitComplete = false;
int mutexWaitTimer = 0;
int lwMutexWaitTimer = 0;
// Thread -> Mutex locks for thread end.
std::map<SceUID, SceUID> mutexHeldLocks;
void __KernelMutexInit()
{
mutexWaitTimer = CoreTiming::RegisterEvent("MutexTimeout", &__KernelMutexTimeout);
lwMutexWaitTimer = CoreTiming::RegisterEvent("LwMutexTimeout", &__KernelLwMutexTimeout);
// TODO: Install on first mutex (if it's slow?)
__KernelListenThreadEnd(&__KernelMutexThreadEnd);
mutexInitComplete = true;
}
void sceKernelDeleteLwMutex()
void __KernelMutexAcquireLock(Mutex *mutex, int count, SceUID thread)
{
ERROR_LOG(HLE,"UNIMPL sceKernelDeleteLwMutex()");
RETURN(0);
_dbg_assert_msg_(HLE, mutexHeldLocks.find(threadID) == mutexHeldLocks.end(), "Thread %d wasn't removed from mutexHeldLocks properly.");
mutexHeldLocks.insert(std::make_pair(thread, mutex->GetUID()));
mutex->nm.lockLevel = count;
mutex->nm.lockThread = thread;
}
void sceKernelTryLockLwMutex()
void __KernelMutexAcquireLock(Mutex *mutex, int count)
{
ERROR_LOG(HLE,"UNIMPL sceKernelTryLockLwMutex()");
RETURN(0);
__KernelMutexAcquireLock(mutex, count, __KernelGetCurThread());
}
void sceKernelLockLwMutex()
void __KernelMutexEraseLock(Mutex *mutex)
{
ERROR_LOG(HLE,"UNIMPL sceKernelLockLwMutex()");
RETURN(0);
if (mutex->nm.lockThread != -1)
mutexHeldLocks.erase(mutex->nm.lockThread);
mutex->nm.lockThread = -1;
}
void sceKernelLockLwMutexCB()
void sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
ERROR_LOG(HLE,"UNIMPL sceKernelLockLwMutexCB()");
RETURN(0);
if (!mutexInitComplete)
__KernelMutexInit();
u32 error = 0;
if (!name)
error = SCE_KERNEL_ERROR_ERROR;
else if (initialCount < 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
if (error)
{
RETURN(error);
return;
}
DEBUG_LOG(HLE,"sceKernelCreateMutex(%s, %08x, %d, %08x)", name, attr, initialCount, optionsPtr);
Mutex *mutex = new Mutex();
SceUID id = kernelObjects.Create(mutex);
mutex->nm.size = sizeof(mutex);
strncpy(mutex->nm.name, name, 31);
mutex->nm.name[31] = 0;
mutex->nm.attr = attr;
if (initialCount == 0)
{
mutex->nm.lockLevel = 0;
mutex->nm.lockThread = -1;
}
else
__KernelMutexAcquireLock(mutex, initialCount);
if (optionsPtr != 0)
WARN_LOG(HLE,"sceKernelCreateMutex(%s) unsupported options parameter.", name);
RETURN(id);
__KernelReSchedule("mutex created");
}
void sceKernelUnlockLwMutex()
void sceKernelDeleteMutex(SceUID id)
{
ERROR_LOG(HLE,"UNIMPL void sceKernelUnlockLwMutex()");
DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (mutex)
{
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0 && mutexWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
}
if (mutex->nm.lockThread != -1)
__KernelMutexEraseLock(mutex);
mutex->waitingThreads.empty();
RETURN(kernelObjects.Destroy<Mutex>(id));
__KernelReSchedule("mutex deleted");
}
else
RETURN(error);
}
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error)
{
if (!error)
{
if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (count > 1 && !(mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
// Two positive ints will always overflow to negative.
else if (count + mutex->nm.lockLevel < 0)
error = PSP_MUTEX_ERROR_LOCK_OVERFLOW;
}
if (error)
return false;
if (mutex->nm.lockLevel == 0)
{
__KernelMutexAcquireLock(mutex, count);
// Nobody had it locked - no need to block
return true;
}
if (mutex->nm.lockThread == __KernelGetCurThread())
{
// Recursive mutex, let's just increase the lock count and keep going
if (mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
{
mutex->nm.lockLevel += count;
return true;
}
else
{
error = PSP_MUTEX_ERROR_ALREADY_LOCKED;
return false;
}
}
return false;
}
bool __KernelUnlockMutex(Mutex *mutex, u32 &error)
{
__KernelMutexEraseLock(mutex);
// TODO: PSP_MUTEX_ATTR_PRIORITY
bool wokeThreads = false;
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
int wVal = (int)__KernelGetWaitValue(threadID, error);
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
__KernelMutexAcquireLock(mutex, wVal, threadID);
if (timeoutPtr != 0 && mutexWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, 0);
wokeThreads = true;
mutex->waitingThreads.erase(iter);
break;
}
if (!wokeThreads)
mutex->nm.lockThread = -1;
return wokeThreads;
}
void __KernelMutexTimeout(u64 userdata, int cyclesLate)
{
SceUID threadID = (SceUID)userdata;
u32 error;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0)
Memory::Write_U32(0, timeoutPtr);
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error);
if (mutex)
{
// This thread isn't waiting anymore.
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
void __KernelMutexThreadEnd(SceUID threadID)
{
u32 error;
// If it was waiting on the mutex, it should finish now.
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
if (mutexID)
{
Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error);
if (mutex)
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
}
std::map<SceUID, SceUID>::iterator iter = mutexHeldLocks.find(threadID);
if (iter != mutexHeldLocks.end())
{
SceUID mutexID = (*iter).second;
Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error);
if (mutex)
__KernelUnlockMutex(mutex, error);
}
}
void __KernelWaitMutex(Mutex *mutex, u32 timeoutPtr)
{
if (timeoutPtr == 0 || mutexWaitTimer == 0)
return;
// This should call __KernelMutexTimeout() later, unless we cancel it.
int micro = (int) Memory::Read_U32(timeoutPtr);
CoreTiming::ScheduleEvent(usToCycles(micro), mutexWaitTimer, __KernelGetCurThread());
}
// int sceKernelLockMutex(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutex(SceUID id, int count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockMutex(%i, %i, %08x)", id, count, timeoutPtr);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (__KernelLockMutex(mutex, count, error))
{
RETURN(0);
__KernelReSchedule("mutex locked");
}
else if (error)
RETURN(error);
else
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
__KernelWaitMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, false);
}
}
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
// void because it changes threads.
void sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (__KernelLockMutex(mutex, count, error))
{
RETURN(0);
__KernelReSchedule("mutex locked");
}
else if (error)
RETURN(error);
else
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
__KernelWaitMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true);
__KernelCheckCallbacks();
}
__KernelReSchedule("mutex locked");
}
// int sceKernelTryLockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelTryLockMutex(SceUID id, int count)
{
DEBUG_LOG(HLE,"sceKernelTryLockMutex(%i, %i)", id, count);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (__KernelLockMutex(mutex, count, error))
{
RETURN(0);
__KernelReSchedule("mutex trylocked");
}
else if (error)
RETURN(error);
else
RETURN(PSP_MUTEX_ERROR_TRYLOCK_FAILED);
}
// int sceKernelUnlockMutex(SceUID id, int count)
// void because it changes threads.
void sceKernelUnlockMutex(SceUID id, int count)
{
DEBUG_LOG(HLE,"sceKernelUnlockMutex(%i, %i)", id, count);
u32 error;
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
if (!error)
{
if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread())
error = PSP_MUTEX_ERROR_NOT_LOCKED;
else if (mutex->nm.lockLevel < count)
error = PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW;
}
if (error)
{
RETURN(error);
return;
}
mutex->nm.lockLevel -= count;
RETURN(0);
if (mutex->nm.lockLevel == 0)
{
__KernelUnlockMutex(mutex, error);
__KernelReSchedule("mutex unlocked");
}
}
void sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr)
{
if (!mutexInitComplete)
__KernelMutexInit();
DEBUG_LOG(HLE,"sceKernelCreateLwMutex(%08x, %s, %08x, %d, %08x)", workareaPtr, name, attr, initialCount, optionsPtr);
u32 error = 0;
if (!name)
error = SCE_KERNEL_ERROR_ERROR;
else if (initialCount < 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
if (error)
{
RETURN(error);
return;
}
LwMutex *mutex = new LwMutex();
SceUID id = kernelObjects.Create(mutex);
mutex->nm.size = sizeof(mutex);
strncpy(mutex->nm.name, name, 31);
mutex->nm.name[31] = 0;
mutex->nm.attr = attr;
mutex->nm.workareaPtr = workareaPtr;
NativeLwMutexWorkarea workarea;
workarea.init();
workarea.lockLevel = initialCount;
if (initialCount == 0)
workarea.lockThread = 0;
else
workarea.lockThread = __KernelGetCurThread();
workarea.attr = attr;
workarea.uid = id;
Memory::WriteStruct(workareaPtr, &workarea);
if (optionsPtr != 0)
WARN_LOG(HLE,"sceKernelCreateLwMutex(%s) unsupported options parameter.", name);
RETURN(0);
__KernelReSchedule("lwmutex created");
}
void sceKernelDeleteLwMutex(u32 workareaPtr)
{
DEBUG_LOG(HLE,"sceKernelDeleteLwMutex(%08x)", workareaPtr);
if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
{
RETURN(SCE_KERNEL_ERROR_ILLEGAL_ADDR);
return;
}
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error;
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
if (mutex)
{
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
}
mutex->waitingThreads.empty();
RETURN(kernelObjects.Destroy<LwMutex>(workarea.uid));
workarea.clear();
Memory::WriteStruct(workareaPtr, &workarea);
__KernelReSchedule("mutex deleted");
}
else
RETURN(error);
}
bool __KernelLockLwMutex(NativeLwMutexWorkarea &workarea, int count, u32 &error)
{
if (!error)
{
if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (count > 1 && !(workarea.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
// Two positive ints will always overflow to negative.
else if (count + workarea.lockLevel < 0)
error = PSP_LWMUTEX_ERROR_LOCK_OVERFLOW;
else if (workarea.uid == -1)
error = PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
}
if (error)
return false;
if (workarea.lockLevel == 0)
{
if (workarea.lockThread != 0)
{
// Validate that it actually exists so we can return an error if not.
kernelObjects.Get<LwMutex>(workarea.uid, error);
if (error)
return false;
}
workarea.lockLevel = count;
workarea.lockThread = __KernelGetCurThread();
return true;
}
if (workarea.lockThread == __KernelGetCurThread())
{
// Recursive mutex, let's just increase the lock count and keep going
if (workarea.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
{
workarea.lockLevel += count;
return true;
}
else
{
error = PSP_LWMUTEX_ERROR_ALREADY_LOCKED;
return false;
}
}
return false;
}
bool __KernelUnlockLwMutex(NativeLwMutexWorkarea &workarea, u32 &error)
{
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
if (error)
{
workarea.lockThread = 0;
return false;
}
// TODO: PSP_MUTEX_ATTR_PRIORITY
bool wokeThreads = false;
std::vector<SceUID>::iterator iter, end;
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
{
SceUID threadID = *iter;
int wVal = (int)__KernelGetWaitValue(threadID, error);
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
workarea.lockLevel = wVal;
workarea.lockThread = threadID;
if (timeoutPtr != 0 && lwMutexWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, 0);
wokeThreads = true;
mutex->waitingThreads.erase(iter);
break;
}
if (!wokeThreads)
workarea.lockThread = 0;
return wokeThreads;
}
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate)
{
SceUID threadID = (SceUID)userdata;
u32 error;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0)
Memory::Write_U32(0, timeoutPtr);
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
LwMutex *mutex = kernelObjects.Get<LwMutex>(mutexID, error);
if (mutex)
{
// This thread isn't waiting anymore.
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
void __KernelWaitLwMutex(LwMutex *mutex, u32 timeoutPtr)
{
if (timeoutPtr == 0 || lwMutexWaitTimer == 0)
return;
// This should call __KernelMutexTimeout() later, unless we cancel it.
int micro = (int) Memory::Read_U32(timeoutPtr);
CoreTiming::ScheduleEvent(usToCycles(micro), lwMutexWaitTimer, __KernelGetCurThread());
}
void sceKernelTryLockLwMutex(u32 workareaPtr, int count)
{
DEBUG_LOG(HLE,"sceKernelTryLockLwMutex(%08x, %i)", workareaPtr, count);
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error = 0;
if (__KernelLockLwMutex(workarea, count, error))
{
Memory::WriteStruct(workareaPtr, &workarea);
RETURN(0);
__KernelReSchedule("lwmutex trylocked");
}
else if (error)
RETURN(PSP_MUTEX_ERROR_TRYLOCK_FAILED);
else
RETURN(PSP_MUTEX_ERROR_TRYLOCK_FAILED);
}
void sceKernelTryLockLwMutex_600(u32 workareaPtr, int count)
{
DEBUG_LOG(HLE,"sceKernelTryLockLwMutex_600(%08x, %i)", workareaPtr, count);
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error = 0;
if (__KernelLockLwMutex(workarea, count, error))
{
Memory::WriteStruct(workareaPtr, &workarea);
RETURN(0);
__KernelReSchedule("lwmutex trylocked");
}
else if (error)
RETURN(error);
else
RETURN(PSP_LWMUTEX_ERROR_TRYLOCK_FAILED);
}
void sceKernelLockLwMutex(u32 workareaPtr, int count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockLwMutex(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error = 0;
if (__KernelLockLwMutex(workarea, count, error))
{
Memory::WriteStruct(workareaPtr, &workarea);
RETURN(0);
__KernelReSchedule("lwmutex locked");
}
else if (error)
RETURN(error);
else
{
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
if (mutex)
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
__KernelWaitLwMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea.uid, count, timeoutPtr, false);
}
else
RETURN(error);
}
}
void sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr)
{
DEBUG_LOG(HLE,"sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error = 0;
if (__KernelLockLwMutex(workarea, count, error))
{
Memory::WriteStruct(workareaPtr, &workarea);
RETURN(0);
__KernelReSchedule("lwmutex locked");
}
else if (error)
RETURN(error);
else
{
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea.uid, error);
if (mutex)
{
mutex->waitingThreads.push_back(__KernelGetCurThread());
__KernelWaitLwMutex(mutex, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea.uid, count, timeoutPtr, true);
__KernelCheckCallbacks();
}
else
RETURN(error);
}
}
void sceKernelUnlockLwMutex(u32 workareaPtr, int count)
{
DEBUG_LOG(HLE,"sceKernelUnlockLwMutex(%08x, %i)", workareaPtr, count);
NativeLwMutexWorkarea workarea;
Memory::ReadStruct(workareaPtr, &workarea);
u32 error = 0;
if (workarea.uid == -1)
error = PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
else if (count <= 0)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if ((workarea.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
else if (workarea.lockLevel == 0 || workarea.lockThread != __KernelGetCurThread())
error = PSP_LWMUTEX_ERROR_NOT_LOCKED;
else if (workarea.lockLevel < count)
error = PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW;
if (error)
{
RETURN(error);
return;
}
workarea.lockLevel -= count;
RETURN(0);
if (workarea.lockLevel == 0)
{
__KernelUnlockLwMutex(workarea, error);
Memory::WriteStruct(workareaPtr, &workarea);
__KernelReSchedule("mutex unlocked");
}
else
Memory::WriteStruct(workareaPtr, &workarea);
}

View File

@ -17,16 +17,21 @@
#pragma once
// TODO
u32 sceKernelCreateMutex(const char *name, u32 attr, u32 options);
u32 sceKernelDeleteMutex(u32 id);
u32 sceKernelLockMutex(u32 id, u32 count, u32 timeoutPtr);
u32 sceKernelLockMutexCB(u32 id, u32 count, u32 timeoutPtr);
u32 sceKernelUnlockMutex(u32 id, u32 count);
void sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr);
void sceKernelDeleteMutex(SceUID id);
void sceKernelLockMutex(SceUID id, int count, u32 timeoutPtr);
void sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr);
void sceKernelTryLockMutex(SceUID id, int count);
void sceKernelUnlockMutex(SceUID id, int count);
void sceKernelCreateLwMutex();
void sceKernelDeleteLwMutex();
void sceKernelTryLockLwMutex();
void sceKernelLockLwMutex();
void sceKernelLockLwMutexCB();
void sceKernelUnlockLwMutex();
void sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr);
void sceKernelDeleteLwMutex(u32 workareaPtr);
void sceKernelTryLockLwMutex(u32 workareaPtr, int count);
void sceKernelTryLockLwMutex_600(u32 workareaPtr, int count);
void sceKernelLockLwMutex(u32 workareaPtr, int count, u32 timeoutPtr);
void sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr);
void sceKernelUnlockLwMutex(u32 workareaPtr, int count);
void __KernelMutexTimeout(u64 userdata, int cyclesLate);
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate);
void __KernelMutexThreadEnd(SceUID thread);

View File

@ -15,9 +15,10 @@
// Official git repository and contact information can be found at
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
#include <algorithm>
#include "HLE.h"
#include "../MIPS/MIPS.h"
#include "../../Core/CoreTiming.h"
#include "sceKernel.h"
#include "sceKernelThread.h"
#include "sceKernelSemaphore.h"
@ -60,6 +61,15 @@ struct Semaphore : public KernelObject
std::vector<SceUID> waitingThreads;
};
bool semaInitComplete = false;
int semaWaitTimer = 0;
void __KernelSemaInit()
{
semaWaitTimer = CoreTiming::RegisterEvent("SemaphoreTimeout", &__KernelSemaTimeout);
semaInitComplete = true;
}
// Resume all waiting threads (for delete / cancel.)
// Returns true if it woke any threads.
bool __KernelClearSemaThreads(Semaphore *s, int reason)
@ -72,8 +82,16 @@ bool __KernelClearSemaThreads(Semaphore *s, int reason)
{
SceUID threadID = *iter;
// TODO: Set returnValue = reason?
__KernelResumeThreadFromWait(threadID);
u32 error;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0 && semaWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, reason);
wokeThreads = true;
}
s->waitingThreads.empty();
@ -103,7 +121,7 @@ void sceKernelCancelSema(SceUID id, int newCount, u32 numWaitThreadsPtr)
*numWaitThreads = s->ns.numWaitThreads;
}
if (newCount == -1)
if (newCount < 0)
s->ns.currentCount = s->ns.initCount;
else
s->ns.currentCount = newCount;
@ -126,6 +144,9 @@ void sceKernelCancelSema(SceUID id, int newCount, u32 numWaitThreadsPtr)
// void because it changes threads.
void sceKernelCreateSema(const char* name, u32 attr, int initVal, int maxVal, u32 optionPtr)
{
if (!semaInitComplete)
__KernelSemaInit();
if (!name)
{
RETURN(SCE_KERNEL_ERROR_ERROR);
@ -146,6 +167,9 @@ void sceKernelCreateSema(const char* name, u32 attr, int initVal, int maxVal, u3
DEBUG_LOG(HLE,"%i=sceKernelCreateSema(%s, %08x, %i, %i, %08x)", id, s->ns.name, s->ns.attr, s->ns.initCount, s->ns.maxCount, optionPtr);
if (optionPtr != 0)
WARN_LOG(HLE,"sceKernelCreateSema(%s) unsupported options parameter.", name);
RETURN(id);
__KernelReSchedule("semaphore created");
@ -221,13 +245,23 @@ retry:
for (iter = s->waitingThreads.begin(); iter!=s->waitingThreads.end(); iter++)
{
SceUID threadID = *iter;
int wVal = (int)__KernelGetWaitValue(threadID, error);
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (wVal <= s->ns.currentCount)
{
s->ns.currentCount -= wVal;
s->ns.numWaitThreads--;
__KernelResumeThreadFromWait(threadID);
if (timeoutPtr != 0 && semaWaitTimer != 0)
{
// Remove any event for this thread.
int cyclesLeft = CoreTiming::UnscheduleEvent(semaWaitTimer, threadID);
Memory::Write_U32(cyclesToUs(cyclesLeft), timeoutPtr);
}
__KernelResumeThreadFromWait(threadID, 0);
wokeThreads = true;
s->waitingThreads.erase(iter);
goto retry;
@ -247,12 +281,49 @@ retry:
}
}
void __KernelSemaTimeout(u64 userdata, int cycleslate)
{
SceUID threadID = (SceUID)userdata;
u32 error;
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
if (timeoutPtr != 0)
Memory::Write_U32(0, timeoutPtr);
SceUID semaID = __KernelGetWaitID(threadID, WAITTYPE_SEMA, error);
Semaphore *s = kernelObjects.Get<Semaphore>(semaID, error);
if (s)
{
// This thread isn't waiting anymore.
s->waitingThreads.erase(std::remove(s->waitingThreads.begin(), s->waitingThreads.end(), threadID), s->waitingThreads.end());
s->ns.numWaitThreads--;
}
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
}
void __KernelSetSemaTimeout(Semaphore *s, u32 timeoutPtr)
{
if (timeoutPtr == 0 || semaWaitTimer == 0)
return;
// This should call __KernelMutexTimeout() later, unless we cancel it.
int micro = (int) Memory::Read_U32(timeoutPtr);
CoreTiming::ScheduleEvent(usToCycles(micro), semaWaitTimer, __KernelGetCurThread());
}
void __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *badSemaMessage, bool processCallbacks)
{
u32 error;
Semaphore *s = kernelObjects.Get<Semaphore>(id, error);
if (s)
{
if (wantedCount > s->ns.maxCount || wantedCount <= 0)
{
RETURN(SCE_KERNEL_ERROR_ILLEGAL_COUNT);
return;
}
// We need to set the return value BEFORE processing callbacks / etc.
RETURN(0);
@ -262,8 +333,8 @@ void __KernelWaitSema(SceUID id, int wantedCount, u32 timeoutPtr, const char *ba
{
s->ns.numWaitThreads++;
s->waitingThreads.push_back(__KernelGetCurThread());
// TODO: timeoutPtr?
__KernelWaitCurThread(WAITTYPE_SEMA, id, wantedCount, 0, processCallbacks);
__KernelSetSemaTimeout(s, timeoutPtr);
__KernelWaitCurThread(WAITTYPE_SEMA, id, wantedCount, timeoutPtr, processCallbacks);
if (processCallbacks)
__KernelCheckCallbacks();
}

View File

@ -25,3 +25,5 @@ void sceKernelReferSemaStatus(SceUID id, u32 infoPtr);
void sceKernelSignalSema(SceUID id, int signal);
void sceKernelWaitSema(SceUID semaid, int signal, u32 timeoutPtr);
void sceKernelWaitSemaCB(SceUID semaid, int signal, u32 timeoutPtr);
void __KernelSemaTimeout(u64 userdata, int cycleslate);

View File

@ -72,6 +72,7 @@ const char *waitTypeStrings[] =
"Umd",
"Vblank",
"Mutex",
"LwMutex",
};
struct SceKernelSysClock {
@ -159,6 +160,7 @@ struct NativeThread
struct ThreadWaitInfo {
u32 waitValue;
u32 timeoutPtr;
};
class Thread : public KernelObject
@ -272,6 +274,7 @@ u32 threadReturnHackAddr;
u32 cbReturnHackAddr;
u32 intReturnHackAddr;
std::vector<Thread *> threadqueue; //Change to SceUID
std::vector<ThreadCallback> threadEndListeners;
SceUID threadIdleID[2];
@ -342,6 +345,21 @@ void __KernelThreadingInit()
// These idle threads are later started in LoadExec, which calls __KernelStartIdleThreads below.
}
void __KernelListenThreadEnd(ThreadCallback callback)
{
threadEndListeners.push_back(callback);
}
void __KernelFireThreadEnd(Thread *thread)
{
SceUID threadID = thread->GetUID();
for (std::vector<ThreadCallback>::iterator iter = threadEndListeners.begin(), end = threadEndListeners.end(); iter != end; ++iter)
{
ThreadCallback cb = *iter;
cb(threadID);
}
}
void __KernelStartIdleThreads()
{
for (int i = 0; i < 2; i++)
@ -392,6 +410,37 @@ u32 __KernelGetWaitValue(SceUID threadID, u32 &error)
}
}
u32 __KernelGetWaitTimeoutPtr(SceUID threadID, u32 &error)
{
Thread *t = kernelObjects.Get<Thread>(threadID, error);
if (t)
{
return t->waitInfo.timeoutPtr;
}
else
{
ERROR_LOG(HLE, "__KernelGetWaitTimeoutPtr ERROR: thread %i", threadID);
return 0;
}
}
SceUID __KernelGetWaitID(SceUID threadID, WaitType type, u32 &error)
{
Thread *t = kernelObjects.Get<Thread>(threadID, error);
if (t)
{
if (t->nt.waitType == type)
return t->nt.waitID;
else
return 0;
}
else
{
ERROR_LOG(HLE, "__KernelGetWaitID ERROR: thread %i", threadID);
return 0;
}
}
void sceKernelReferThreadStatus()
{
SceUID threadID = PARAM(0);
@ -512,6 +561,50 @@ void __KernelLoadContext(ThreadContext *ctx)
// currentMIPS->fcr31 = ctx->fcr31;
}
void __KernelResumeThreadFromWait(Thread *t)
{
t->nt.status &= ~THREADSTATUS_WAIT;
// TODO: What if DORMANT or DEAD?
if (!(t->nt.status & THREADSTATUS_WAITSUSPEND))
t->nt.status = THREADSTATUS_READY;
// Non-waiting threads do not process callbacks.
t->isProcessingCallbacks = false;
}
u32 __KernelResumeThreadFromWait(SceUID threadID)
{
u32 error;
Thread *t = kernelObjects.Get<Thread>(threadID, error);
if (t)
{
__KernelResumeThreadFromWait(t);
return 0;
}
else
{
ERROR_LOG(HLE, "__KernelResumeThreadFromWait(%d): bad thread: %08x", threadID, error);
return error;
}
}
u32 __KernelResumeThreadFromWait(SceUID threadID, int retval)
{
u32 error;
Thread *t = kernelObjects.Get<Thread>(threadID, error);
if (t)
{
__KernelResumeThreadFromWait(t);
t->setReturnValue(retval);
return 0;
}
else
{
ERROR_LOG(HLE, "__KernelResumeThreadFromWait(%d): bad thread: %08x", threadID, error);
return error;
}
}
// DANGEROUS
// Only run when you can safely accept a context switch
// Triggers a waitable event, that is, it wakes up all threads that waits for it
@ -527,69 +620,41 @@ bool __KernelTriggerWait(WaitType type, int id, bool dontSwitch)
{
if (t->nt.waitType == type && t->nt.waitID == id)
{
// This threads is waiting for the triggered object
t->nt.status &= ~THREADSTATUS_WAIT;
if (t->nt.status == 0)
{
t->nt.status = THREADSTATUS_READY;
}
// Non-waiting threads do not process callbacks.
t->isProcessingCallbacks = false;
// This thread was waiting for the triggered object.
__KernelResumeThreadFromWait(t);
doneAnything = true;
}
}
}
// if (doneAnything) // lumines?
{
if (!dontSwitch)
{
// TODO: time waster
char temp[256];
sprintf(temp, "resumed from wait %s", waitTypeStrings[(int)type]);
__KernelReSchedule(temp);
}
}
// if (doneAnything) // lumines?
{
if (!dontSwitch)
{
// TODO: time waster
char temp[256];
sprintf(temp, "resumed from wait %s", waitTypeStrings[(int)type]);
__KernelReSchedule(temp);
}
}
return true;
}
u32 __KernelResumeThreadFromWait(SceUID threadID)
{
u32 error;
Thread *t = kernelObjects.Get<Thread>(threadID, error);
if (t)
{
t->nt.status &= ~THREADSTATUS_WAIT;
if (!(t->nt.status & (THREADSTATUS_SUSPEND | THREADSTATUS_WAIT)))
t->nt.status |= THREADSTATUS_READY;
t->isProcessingCallbacks = false;
return 0;
}
else
{
ERROR_LOG(HLE, "__KernelResumeThreadFromWait(%d): bad thread: %08x", threadID, error);
return error;
}
}
// makes the current thread wait for an event
void __KernelWaitCurThread(WaitType type, SceUID waitID, u32 waitValue, int timeout, bool processCallbacks)
void __KernelWaitCurThread(WaitType type, SceUID waitID, u32 waitValue, u32 timeoutPtr, bool processCallbacks)
{
currentThread->nt.waitID = waitID;
currentThread->nt.waitType = type;
__KernelChangeThreadState(currentThread, THREADSTATUS_WAIT);
currentThread->nt.numReleases++;
currentThread->waitInfo.waitValue = waitValue;
if (timeout)
{
// TODO:
}
currentThread->waitInfo.timeoutPtr = timeoutPtr;
RETURN(0); //pretend all went OK
// TODO: time waster
char temp[256];
sprintf(temp, "started wait %s", waitTypeStrings[(int)type]);
// TODO: time waster
char temp[256];
sprintf(temp, "started wait %s", waitTypeStrings[(int)type]);
__KernelReSchedule(processCallbacks, temp);
// TODO: Remove thread from Ready queue?
@ -829,12 +894,10 @@ void sceKernelCreateThread()
}
u32 sceKernelStartThread()
// int sceKernelStartThread(SceUID threadToStartID, SceSize argSize, void *argBlock)
// void because it reschedules.
void sceKernelStartThread(SceUID threadToStartID, u32 argSize, u32 argBlockPtr)
{
int threadToStartID = PARAM(0);
u32 argSize = PARAM(1);
u32 argBlockPtr = PARAM(2);
if (threadToStartID != currentThread->GetUID())
{
u32 error;
@ -843,13 +906,15 @@ u32 sceKernelStartThread()
{
ERROR_LOG(HLE,"%08x=sceKernelStartThread(thread=%i, argSize=%i, argPtr= %08x): thread does not exist!",
error,threadToStartID,argSize,argBlockPtr)
return error;
RETURN(error);
return;
}
if (startThread->nt.status != THREADSTATUS_DORMANT)
{
//Not dormant, WTF?
return ERROR_KERNEL_THREAD_IS_NOT_DORMANT;
RETURN(ERROR_KERNEL_THREAD_IS_NOT_DORMANT);
return;
}
INFO_LOG(HLE,"sceKernelStartThread(thread=%i, argSize=%i, argPtr= %08x )",
@ -876,12 +941,14 @@ u32 sceKernelStartThread()
if (!argBlockPtr && argSize > 0) {
WARN_LOG(HLE,"sceKernelStartThread : had NULL arg");
}
return 0;
RETURN(0);
__KernelReSchedule("thread started");
}
else
{
ERROR_LOG(HLE,"thread %i trying to start itself", threadToStartID);
return -1;
RETURN(-1);
}
}
@ -929,6 +996,7 @@ void __KernelReturnFromThread()
currentThread->nt.exitStatus = currentThread->context.r[2];
currentThread->nt.status = THREADSTATUS_DORMANT;
__KernelFireThreadEnd(currentThread);
// TODO: Need to remove the thread from any ready queues.
@ -943,8 +1011,10 @@ void __KernelReturnFromThread()
void sceKernelExitThread()
{
ERROR_LOG(HLE,"sceKernelExitThread FAKED");
currentThread->nt.status = THREADSTATUS_DORMANT;
currentThread->nt.exitStatus = PARAM(0);
currentThread->nt.status = THREADSTATUS_DORMANT;
currentThread->nt.exitStatus = PARAM(0);
__KernelFireThreadEnd(currentThread);
//Find threads that waited for me
// Wake them
if (!__KernelTriggerWait(WAITTYPE_THREADEND, __KernelGetCurThread()))
@ -958,6 +1028,8 @@ void _sceKernelExitThread()
ERROR_LOG(HLE,"_sceKernelExitThread FAKED");
currentThread->nt.status = THREADSTATUS_DORMANT;
currentThread->nt.exitStatus = PARAM(0);
__KernelFireThreadEnd(currentThread);
//Find threads that waited for this one
// Wake them
if (!__KernelTriggerWait(WAITTYPE_THREADEND, __KernelGetCurThread()))
@ -976,6 +1048,7 @@ void sceKernelExitDeleteThread()
ERROR_LOG(HLE,"sceKernelExitDeleteThread()");
currentThread->nt.status = THREADSTATUS_DORMANT;
currentThread->nt.exitStatus = PARAM(0);
__KernelFireThreadEnd(currentThread);
//userMemory.Free(currentThread->stackBlock);
currentThread->stackBlock = -1;
@ -1028,6 +1101,7 @@ void sceKernelDeleteThread()
if (t)
{
__KernelRemoveFromThreadQueue(t);
__KernelFireThreadEnd(t);
RETURN(kernelObjects.Destroy<Thread>(threadHandle));

View File

@ -35,7 +35,7 @@ void sceKernelExitThread();
void _sceKernelExitThread();
void sceKernelGetThreadId();
void sceKernelGetThreadCurrentPriority();
u32 sceKernelStartThread();
void sceKernelStartThread(SceUID threadToStartID, u32 argSize, u32 argBlockPtr);
u32 sceKernelSuspendDispatchThread();
u32 sceKernelResumeDispatchThread(u32 suspended);
void sceKernelWaitThreadEnd();
@ -68,7 +68,9 @@ enum WaitType //probably not the real values
WAITTYPE_AUDIOCHANNEL = 10, // this is fake, should be replaced with 8 eventflags ( ?? )
WAITTYPE_UMD = 11, // this is fake, should be replaced with 1 eventflag ( ?? )
WAITTYPE_VBLANK = 12, // fake
WAITTYPE_MUTEX = 13,
WAITTYPE_MUTEX = 13,
WAITTYPE_LWMUTEX = 14,
// Remember to update sceKernelThread.cpp's waitTypeStrings to match.
};
@ -103,9 +105,12 @@ void __KernelLoadContext(ThreadContext *ctx);
// TODO: Replace this with __KernelResumeThread over time as it's misguided.
bool __KernelTriggerWait(WaitType type, int id, bool dontSwitch = false);
u32 __KernelResumeThreadFromWait(SceUID threadID); // can return an error value
u32 __KernelResumeThreadFromWait(SceUID threadID, int retval);
u32 __KernelGetWaitValue(SceUID threadID, u32 &error);
void __KernelWaitCurThread(WaitType type, SceUID waitId, u32 waitValue, int timeout, bool processCallbacks);
u32 __KernelGetWaitTimeoutPtr(SceUID threadID, u32 &error);
SceUID __KernelGetWaitID(SceUID threadID, WaitType type, u32 &error);
void __KernelWaitCurThread(WaitType type, SceUID waitId, u32 waitValue, u32 timeoutPtr, bool processCallbacks);
void __KernelReSchedule(const char *reason = "no reason");
void __KernelReSchedule(bool doCallbacks, const char *reason);
@ -193,3 +198,6 @@ enum ThreadStatus
};
void __KernelChangeThreadState(Thread *thread, ThreadStatus newStatus);
typedef void (*ThreadCallback)(SceUID threadID);
void __KernelListenThreadEnd(ThreadCallback callback);

12
test.py
View File

@ -54,9 +54,9 @@ tests_good = [
"string/string",
"gpu/callbacks/ge_callbacks",
"threads/mbx/mbx",
"threads/mutex/mutex",
"threads/mutex/delete/delete",
"threads/semaphores/semaphores",
"threads/semaphores/cancel/cancel",
"threads/semaphores/create/create",
"threads/semaphores/delete/delete",
"threads/semaphores/poll/poll",
"threads/semaphores/refer/refer",
@ -72,8 +72,14 @@ tests_next = [
# These are the next tests up for fixing. These run by default.
"threads/fpl/fpl",
"threads/msgpipe/msgpipe",
"threads/mutex/mutex",
"threads/mutex/create/create",
"threads/mutex/lock/lock",
"threads/mutex/priority/priority",
"threads/mutex/try/try",
"threads/mutex/unlock/unlock",
"threads/scheduling/scheduling",
"threads/semaphores/cancel/cancel",
"threads/semaphores/create/create",
"threads/semaphores/priority/priority",
"threads/semaphores/wait/wait",
"threads/threads/threads",