2012-11-01 15:19:01 +00:00
|
|
|
// Copyright (c) 2012- PPSSPP Project.
|
|
|
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
2012-11-04 22:01:49 +00:00
|
|
|
// the Free Software Foundation, version 2.0 or later versions.
|
2012-11-01 15:19:01 +00:00
|
|
|
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License 2.0 for more details.
|
|
|
|
|
|
|
|
// A copy of the GPL 2.0 should have been included with the program.
|
|
|
|
// If not, see http://www.gnu.org/licenses/
|
|
|
|
|
|
|
|
// Official git repository and contact information can be found at
|
|
|
|
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
|
|
|
|
2012-11-19 15:37:16 +00:00
|
|
|
#include <algorithm>
|
2012-11-20 08:18:11 +00:00
|
|
|
#include <map>
|
2012-11-01 15:19:01 +00:00
|
|
|
#include "HLE.h"
|
|
|
|
#include "../MIPS/MIPS.h"
|
2013-02-04 04:31:46 +00:00
|
|
|
#include "Core/CoreTiming.h"
|
2013-03-26 07:54:00 +00:00
|
|
|
#include "Core/Reporting.h"
|
2013-02-04 04:31:46 +00:00
|
|
|
#include "ChunkFile.h"
|
2012-11-01 15:19:01 +00:00
|
|
|
#include "sceKernel.h"
|
|
|
|
#include "sceKernelMutex.h"
|
|
|
|
#include "sceKernelThread.h"
|
|
|
|
|
|
|
|
#define PSP_MUTEX_ATTR_FIFO 0
|
|
|
|
#define PSP_MUTEX_ATTR_PRIORITY 0x100
|
|
|
|
#define PSP_MUTEX_ATTR_ALLOW_RECURSIVE 0x200
|
2012-12-15 07:24:04 +00:00
|
|
|
#define PSP_MUTEX_ATTR_KNOWN (PSP_MUTEX_ATTR_PRIORITY | PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
|
2012-11-01 15:19:01 +00:00
|
|
|
|
|
|
|
// Not sure about the names of these
|
|
|
|
#define PSP_MUTEX_ERROR_NO_SUCH_MUTEX 0x800201C3
|
2012-11-18 04:21:05 +00:00
|
|
|
#define PSP_MUTEX_ERROR_TRYLOCK_FAILED 0x800201C4
|
|
|
|
#define PSP_MUTEX_ERROR_NOT_LOCKED 0x800201C5
|
2012-11-18 06:55:41 +00:00
|
|
|
#define PSP_MUTEX_ERROR_LOCK_OVERFLOW 0x800201C6
|
2012-11-14 16:00:57 +00:00
|
|
|
#define PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201C7
|
2012-11-18 04:21:05 +00:00
|
|
|
#define PSP_MUTEX_ERROR_ALREADY_LOCKED 0x800201C8
|
|
|
|
|
2012-11-21 08:21:10 +00:00
|
|
|
#define PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX 0x800201CA
|
|
|
|
// Note: used only for _600.
|
|
|
|
#define PSP_LWMUTEX_ERROR_TRYLOCK_FAILED 0x800201CB
|
|
|
|
#define PSP_LWMUTEX_ERROR_NOT_LOCKED 0x800201CC
|
|
|
|
#define PSP_LWMUTEX_ERROR_LOCK_OVERFLOW 0x800201CD
|
|
|
|
#define PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW 0x800201CE
|
|
|
|
#define PSP_LWMUTEX_ERROR_ALREADY_LOCKED 0x800201CF
|
2012-11-01 15:19:01 +00:00
|
|
|
|
|
|
|
struct NativeMutex
|
|
|
|
{
|
|
|
|
SceSize size;
|
2012-12-17 00:32:18 +00:00
|
|
|
char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
|
2012-11-01 15:19:01 +00:00
|
|
|
SceUInt attr;
|
2013-01-20 22:11:48 +00:00
|
|
|
int initialCount;
|
2012-11-01 15:19:01 +00:00
|
|
|
int lockLevel;
|
2013-02-25 04:28:39 +00:00
|
|
|
SceUID lockThread;
|
2013-02-25 06:50:48 +00:00
|
|
|
// Not kept up to date.
|
2013-01-20 22:11:48 +00:00
|
|
|
int numWaitThreads;
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct Mutex : public KernelObject
|
|
|
|
{
|
|
|
|
const char *GetName() {return nm.name;}
|
|
|
|
const char *GetTypeName() {return "Mutex";}
|
2012-11-19 09:01:19 +00:00
|
|
|
static u32 GetMissingErrorCode() { return PSP_MUTEX_ERROR_NO_SUCH_MUTEX; }
|
2012-11-01 15:19:01 +00:00
|
|
|
int GetIDType() const { return SCE_KERNEL_TMID_Mutex; }
|
2012-12-27 04:34:10 +00:00
|
|
|
|
|
|
|
virtual void DoState(PointerWrap &p)
|
|
|
|
{
|
|
|
|
p.Do(nm);
|
2012-12-28 21:01:46 +00:00
|
|
|
SceUID dv = 0;
|
|
|
|
p.Do(waitingThreads, dv);
|
2013-03-30 20:48:29 +00:00
|
|
|
p.Do(pausedWaitTimeouts);
|
2012-12-27 04:34:10 +00:00
|
|
|
p.DoMarker("Mutex");
|
|
|
|
}
|
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
NativeMutex nm;
|
|
|
|
std::vector<SceUID> waitingThreads;
|
2013-03-30 20:48:29 +00:00
|
|
|
// Key is the callback id it was for, or if no callback, the thread id.
|
|
|
|
std::map<SceUID, u64> pausedWaitTimeouts;
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
2013-02-25 06:50:48 +00:00
|
|
|
|
2012-11-19 09:01:19 +00:00
|
|
|
struct NativeLwMutex
|
|
|
|
{
|
|
|
|
SceSize size;
|
2012-12-17 00:32:18 +00:00
|
|
|
char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
|
2012-11-19 09:01:19 +00:00
|
|
|
SceUInt attr;
|
2013-02-25 06:50:48 +00:00
|
|
|
SceUID uid;
|
|
|
|
u32 workareaPtr;
|
|
|
|
int initialCount;
|
|
|
|
// Not kept up to date.
|
|
|
|
int currentCount;
|
|
|
|
// Not kept up to date.
|
|
|
|
SceUID lockThread;
|
|
|
|
// Not kept up to date.
|
|
|
|
int numWaitThreads;
|
2012-11-19 09:01:19 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct NativeLwMutexWorkarea
|
|
|
|
{
|
|
|
|
int lockLevel;
|
|
|
|
SceUID lockThread;
|
|
|
|
int attr;
|
|
|
|
int numWaitThreads;
|
|
|
|
SceUID uid;
|
|
|
|
int pad[3];
|
|
|
|
|
|
|
|
void init()
|
|
|
|
{
|
|
|
|
memset(this, 0, sizeof(NativeLwMutexWorkarea));
|
|
|
|
}
|
|
|
|
|
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
lockLevel = 0;
|
|
|
|
lockThread = -1;
|
|
|
|
uid = -1;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct LwMutex : public KernelObject
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
|
|
|
const char *GetName() {return nm.name;}
|
2012-11-19 09:01:19 +00:00
|
|
|
const char *GetTypeName() {return "LwMutex";}
|
2012-11-21 08:21:10 +00:00
|
|
|
static u32 GetMissingErrorCode() { return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX; }
|
2012-11-01 15:19:01 +00:00
|
|
|
int GetIDType() const { return SCE_KERNEL_TMID_LwMutex; }
|
2012-12-27 04:34:10 +00:00
|
|
|
|
|
|
|
virtual void DoState(PointerWrap &p)
|
|
|
|
{
|
|
|
|
p.Do(nm);
|
2012-12-28 21:01:46 +00:00
|
|
|
SceUID dv = 0;
|
|
|
|
p.Do(waitingThreads, dv);
|
2013-03-30 22:23:52 +00:00
|
|
|
p.Do(pausedWaitTimeouts);
|
2012-12-27 04:34:10 +00:00
|
|
|
p.DoMarker("LwMutex");
|
|
|
|
}
|
|
|
|
|
2012-11-19 09:01:19 +00:00
|
|
|
NativeLwMutex nm;
|
2012-11-01 15:19:01 +00:00
|
|
|
std::vector<SceUID> waitingThreads;
|
2013-03-30 22:23:52 +00:00
|
|
|
// Key is the callback id it was for, or if no callback, the thread id.
|
|
|
|
std::map<SceUID, u64> pausedWaitTimeouts;
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
2013-01-18 06:04:52 +00:00
|
|
|
static int mutexWaitTimer = -1;
|
|
|
|
static int lwMutexWaitTimer = -1;
|
2012-11-20 08:18:11 +00:00
|
|
|
// Thread -> Mutex locks for thread end.
|
2012-11-23 00:31:18 +00:00
|
|
|
typedef std::multimap<SceUID, SceUID> MutexMap;
|
2012-12-24 05:27:26 +00:00
|
|
|
static MutexMap mutexHeldLocks;
|
2012-11-19 15:31:36 +00:00
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
void __KernelMutexBeginCallback(SceUID threadID, SceUID prevCallbackId);
|
|
|
|
void __KernelMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue);
|
2013-03-30 22:23:52 +00:00
|
|
|
void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId);
|
|
|
|
void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue);
|
2013-03-30 20:48:29 +00:00
|
|
|
|
2012-11-19 15:31:36 +00:00
|
|
|
void __KernelMutexInit()
|
|
|
|
{
|
2012-12-27 22:21:39 +00:00
|
|
|
mutexWaitTimer = CoreTiming::RegisterEvent("MutexTimeout", __KernelMutexTimeout);
|
|
|
|
lwMutexWaitTimer = CoreTiming::RegisterEvent("LwMutexTimeout", __KernelLwMutexTimeout);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
__KernelListenThreadEnd(&__KernelMutexThreadEnd);
|
2013-03-30 20:48:29 +00:00
|
|
|
__KernelRegisterWaitTypeFuncs(WAITTYPE_MUTEX, __KernelMutexBeginCallback, __KernelMutexEndCallback);
|
2013-03-30 22:23:52 +00:00
|
|
|
__KernelRegisterWaitTypeFuncs(WAITTYPE_LWMUTEX, __KernelLwMutexBeginCallback, __KernelLwMutexEndCallback);
|
2012-12-23 10:19:55 +00:00
|
|
|
}
|
|
|
|
|
2012-12-27 22:21:39 +00:00
|
|
|
void __KernelMutexDoState(PointerWrap &p)
|
|
|
|
{
|
|
|
|
p.Do(mutexWaitTimer);
|
|
|
|
CoreTiming::RestoreRegisterEvent(mutexWaitTimer, "MutexTimeout", __KernelMutexTimeout);
|
|
|
|
p.Do(lwMutexWaitTimer);
|
|
|
|
CoreTiming::RestoreRegisterEvent(lwMutexWaitTimer, "LwMutexTimeout", __KernelLwMutexTimeout);
|
|
|
|
p.Do(mutexHeldLocks);
|
|
|
|
p.DoMarker("sceKernelMutex");
|
|
|
|
}
|
|
|
|
|
2012-12-27 06:45:19 +00:00
|
|
|
KernelObject *__KernelMutexObject()
|
|
|
|
{
|
|
|
|
return new Mutex;
|
|
|
|
}
|
|
|
|
|
|
|
|
KernelObject *__KernelLwMutexObject()
|
|
|
|
{
|
|
|
|
return new LwMutex;
|
|
|
|
}
|
|
|
|
|
2012-12-23 10:19:55 +00:00
|
|
|
void __KernelMutexShutdown()
|
|
|
|
{
|
|
|
|
mutexHeldLocks.clear();
|
2012-11-19 15:31:36 +00:00
|
|
|
}
|
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
void __KernelMutexAcquireLock(Mutex *mutex, int count, SceUID thread)
|
|
|
|
{
|
2012-12-26 07:52:40 +00:00
|
|
|
#if defined(_DEBUG)
|
2012-11-23 00:31:18 +00:00
|
|
|
std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(thread);
|
|
|
|
for (MutexMap::iterator iter = locked.first; iter != locked.second; ++iter)
|
|
|
|
_dbg_assert_msg_(HLE, (*iter).second != mutex->GetUID(), "Thread %d / mutex %d wasn't removed from mutexHeldLocks properly.", thread, mutex->GetUID());
|
|
|
|
#endif
|
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
mutexHeldLocks.insert(std::make_pair(thread, mutex->GetUID()));
|
|
|
|
|
|
|
|
mutex->nm.lockLevel = count;
|
2013-02-25 04:28:39 +00:00
|
|
|
mutex->nm.lockThread = thread;
|
2012-11-20 08:18:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelMutexAcquireLock(Mutex *mutex, int count)
|
|
|
|
{
|
|
|
|
__KernelMutexAcquireLock(mutex, count, __KernelGetCurThread());
|
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelMutexEraseLock(Mutex *mutex)
|
|
|
|
{
|
2013-02-25 04:28:39 +00:00
|
|
|
if (mutex->nm.lockThread != -1)
|
2012-11-23 00:31:18 +00:00
|
|
|
{
|
|
|
|
SceUID id = mutex->GetUID();
|
2013-02-25 04:28:39 +00:00
|
|
|
std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(mutex->nm.lockThread);
|
2012-11-23 00:31:18 +00:00
|
|
|
for (MutexMap::iterator iter = locked.first; iter != locked.second; ++iter)
|
|
|
|
{
|
|
|
|
if ((*iter).second == id)
|
|
|
|
{
|
|
|
|
mutexHeldLocks.erase(iter);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-02-25 04:28:39 +00:00
|
|
|
mutex->nm.lockThread = -1;
|
2012-11-20 08:18:11 +00:00
|
|
|
}
|
|
|
|
|
2012-12-08 03:01:31 +00:00
|
|
|
std::vector<SceUID>::iterator __KernelMutexFindPriority(std::vector<SceUID> &waiting)
|
|
|
|
{
|
|
|
|
_dbg_assert_msg_(HLE, !waiting.empty(), "__KernelMutexFindPriority: Trying to find best of no threads.");
|
|
|
|
|
|
|
|
std::vector<SceUID>::iterator iter, end, best = waiting.end();
|
|
|
|
u32 best_prio = 0xFFFFFFFF;
|
|
|
|
for (iter = waiting.begin(), end = waiting.end(); iter != end; ++iter)
|
|
|
|
{
|
|
|
|
u32 iter_prio = __KernelGetThreadPrio(*iter);
|
|
|
|
if (iter_prio < best_prio)
|
|
|
|
{
|
|
|
|
best = iter;
|
|
|
|
best_prio = iter_prio;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
_dbg_assert_msg_(HLE, best != waiting.end(), "__KernelMutexFindPriority: Returning invalid best thread.");
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
bool __KernelUnlockMutexForThread(Mutex *mutex, SceUID threadID, u32 &error, int result)
|
|
|
|
{
|
|
|
|
SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
|
|
|
|
// The waitID may be different after a timeout.
|
|
|
|
if (waitID != mutex->GetUID())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If result is an error code, we're just letting it go.
|
|
|
|
if (result == 0)
|
|
|
|
{
|
|
|
|
int wVal = (int)__KernelGetWaitValue(threadID, error);
|
|
|
|
__KernelMutexAcquireLock(mutex, wVal, threadID);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (timeoutPtr != 0 && mutexWaitTimer != -1)
|
|
|
|
{
|
|
|
|
// Remove any event for this thread.
|
2013-04-06 03:42:20 +00:00
|
|
|
s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
|
2013-03-30 20:48:29 +00:00
|
|
|
Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, result);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelMutexBeginCallback(SceUID threadID, SceUID prevCallbackId)
|
|
|
|
{
|
|
|
|
SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;
|
|
|
|
|
|
|
|
u32 error;
|
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
Mutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<Mutex>(mutexID, error);
|
|
|
|
if (mutex)
|
|
|
|
{
|
2013-03-30 22:59:14 +00:00
|
|
|
// This means two callbacks in a row. PSP crashes if the same callback runs inside itself.
|
|
|
|
// TODO: Handle this better?
|
2013-03-30 20:48:29 +00:00
|
|
|
if (mutex->pausedWaitTimeouts.find(pauseKey) != mutex->pausedWaitTimeouts.end())
|
2013-03-30 22:59:14 +00:00
|
|
|
return;
|
2013-03-30 20:48:29 +00:00
|
|
|
|
|
|
|
if (timeoutPtr != 0 && mutexWaitTimer != -1)
|
|
|
|
{
|
2013-04-06 03:42:20 +00:00
|
|
|
s64 cyclesLeft = CoreTiming::UnscheduleEvent(mutexWaitTimer, threadID);
|
2013-03-30 20:48:29 +00:00
|
|
|
mutex->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mutex->pausedWaitTimeouts[pauseKey] = 0;
|
|
|
|
|
|
|
|
// TODO: Hmm, what about priority/fifo order? Does it lose its place in line?
|
|
|
|
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
|
|
|
|
|
|
|
|
DEBUG_LOG(HLE, "sceKernelLockMutexCB: Suspending lock wait for callback");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelLockMutexCB: beginning callback with bad wait id?");
|
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue)
|
|
|
|
{
|
|
|
|
SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;
|
|
|
|
|
|
|
|
u32 error;
|
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
Mutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<Mutex>(mutexID, error);
|
|
|
|
if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end())
|
|
|
|
{
|
|
|
|
// TODO: Since it was deleted, we don't know how long was actually left.
|
|
|
|
// For now, we just say the full time was taken.
|
|
|
|
if (timeoutPtr != 0 && mutexWaitTimer != -1)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey];
|
|
|
|
mutex->pausedWaitTimeouts.erase(pauseKey);
|
|
|
|
|
|
|
|
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
|
|
|
|
|
|
|
|
// Attempt to unlock.
|
|
|
|
if (mutex->nm.lockThread == -1 && __KernelUnlockMutexForThread(mutex, threadID, error, 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We only check if it timed out if it couldn't unlock.
|
|
|
|
s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks();
|
|
|
|
if (cyclesLeft < 0 && waitDeadline != 0)
|
|
|
|
{
|
|
|
|
if (timeoutPtr != 0 && mutexWaitTimer != -1)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-04-15 04:34:33 +00:00
|
|
|
if (timeoutPtr != 0 && mutexWaitTimer != -1)
|
|
|
|
CoreTiming::ScheduleEvent(cyclesLeft, mutexWaitTimer, __KernelGetCurThread());
|
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
// TODO: Should this not go at the end?
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
|
|
|
|
|
|
|
DEBUG_LOG(HLE, "sceKernelLockMutexCB: Resuming lock wait for callback");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelCreateMutex(const char *name, u32 attr, int initialCount, u32 optionsPtr)
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
2012-11-18 07:08:19 +00:00
|
|
|
if (!name)
|
2012-12-14 06:46:22 +00:00
|
|
|
{
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateMutex(): invalid name", SCE_KERNEL_ERROR_ERROR);
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ERROR;
|
2012-12-14 06:46:22 +00:00
|
|
|
}
|
2013-05-18 22:30:10 +00:00
|
|
|
if (attr & ~0xBFF)
|
2012-12-14 06:46:22 +00:00
|
|
|
{
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateMutex(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
|
2012-12-14 06:46:22 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
if (initialCount < 0)
|
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
|
|
|
if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
|
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2012-11-18 03:34:39 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
Mutex *mutex = new Mutex();
|
|
|
|
SceUID id = kernelObjects.Create(mutex);
|
|
|
|
|
2013-02-25 04:28:39 +00:00
|
|
|
mutex->nm.size = sizeof(mutex->nm);
|
2012-12-17 00:32:18 +00:00
|
|
|
strncpy(mutex->nm.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
|
|
|
|
mutex->nm.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
|
2012-11-01 15:19:01 +00:00
|
|
|
mutex->nm.attr = attr;
|
2013-01-20 22:11:48 +00:00
|
|
|
mutex->nm.initialCount = initialCount;
|
2012-11-20 08:18:11 +00:00
|
|
|
if (initialCount == 0)
|
|
|
|
{
|
|
|
|
mutex->nm.lockLevel = 0;
|
2013-02-25 04:28:39 +00:00
|
|
|
mutex->nm.lockThread = -1;
|
2012-11-20 08:18:11 +00:00
|
|
|
}
|
2012-11-19 00:18:06 +00:00
|
|
|
else
|
2012-11-20 08:18:11 +00:00
|
|
|
__KernelMutexAcquireLock(mutex, initialCount);
|
2012-11-19 00:18:06 +00:00
|
|
|
|
2012-12-14 06:46:22 +00:00
|
|
|
DEBUG_LOG(HLE, "%i=sceKernelCreateMutex(%s, %08x, %d, %08x)", id, name, attr, initialCount, optionsPtr);
|
|
|
|
|
2012-11-19 00:18:06 +00:00
|
|
|
if (optionsPtr != 0)
|
2013-05-18 22:30:10 +00:00
|
|
|
{
|
|
|
|
u32 size = Memory::Read_U32(optionsPtr);
|
|
|
|
if (size != 0)
|
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelCreateMutex(%s) unsupported options parameter, size = %d", name, size);
|
|
|
|
}
|
2012-12-15 07:24:04 +00:00
|
|
|
if ((attr & ~PSP_MUTEX_ATTR_KNOWN) != 0)
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelCreateMutex(%s) unsupported attr parameter: %08x", name, attr);
|
2012-11-18 03:34:39 +00:00
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
return id;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelDeleteMutex(SceUID id)
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
|
|
|
DEBUG_LOG(HLE,"sceKernelDeleteMutex(%i)", id);
|
|
|
|
u32 error;
|
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
|
2012-11-15 07:34:52 +00:00
|
|
|
if (mutex)
|
|
|
|
{
|
2012-11-26 08:41:55 +00:00
|
|
|
bool wokeThreads = false;
|
2012-11-18 03:34:39 +00:00
|
|
|
std::vector<SceUID>::iterator iter, end;
|
|
|
|
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
|
2012-12-08 03:01:31 +00:00
|
|
|
wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE);
|
2012-11-19 03:13:39 +00:00
|
|
|
|
2013-02-25 04:28:39 +00:00
|
|
|
if (mutex->nm.lockThread != -1)
|
2012-11-20 08:18:11 +00:00
|
|
|
__KernelMutexEraseLock(mutex);
|
2012-11-26 08:41:55 +00:00
|
|
|
mutex->waitingThreads.clear();
|
2012-11-18 03:34:39 +00:00
|
|
|
|
2012-11-26 08:41:55 +00:00
|
|
|
if (wokeThreads)
|
2012-12-09 06:39:36 +00:00
|
|
|
hleReSchedule("mutex deleted");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
return kernelObjects.Destroy<Mutex>(id);
|
2012-11-15 07:34:52 +00:00
|
|
|
}
|
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
bool __KernelLockMutexCheck(Mutex *mutex, int count, u32 &error)
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
2013-03-30 20:48:29 +00:00
|
|
|
if (error)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const bool mutexIsRecursive = (mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) != 0;
|
|
|
|
|
|
|
|
if (count <= 0)
|
|
|
|
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
|
|
|
else if (count > 1 && !mutexIsRecursive)
|
|
|
|
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
|
|
|
// Two positive ints will always overflow to negative.
|
|
|
|
else if (count + mutex->nm.lockLevel < 0)
|
|
|
|
error = PSP_MUTEX_ERROR_LOCK_OVERFLOW;
|
|
|
|
// Only a recursive mutex can re-lock.
|
|
|
|
else if (mutex->nm.lockThread == __KernelGetCurThread())
|
2012-11-15 07:34:52 +00:00
|
|
|
{
|
2013-03-30 20:48:29 +00:00
|
|
|
if (mutexIsRecursive)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
error = PSP_MUTEX_ERROR_ALREADY_LOCKED;
|
2012-11-15 07:34:52 +00:00
|
|
|
}
|
2013-03-30 20:48:29 +00:00
|
|
|
// Otherwise it would lock or wait.
|
|
|
|
else if (mutex->nm.lockLevel == 0)
|
|
|
|
return true;
|
2012-11-15 07:34:52 +00:00
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool __KernelLockMutex(Mutex *mutex, int count, u32 &error)
|
|
|
|
{
|
|
|
|
if (!__KernelLockMutexCheck(mutex, count, error))
|
2012-11-18 07:08:19 +00:00
|
|
|
return false;
|
2012-11-14 16:00:57 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
if (mutex->nm.lockLevel == 0)
|
|
|
|
{
|
2012-11-20 08:18:11 +00:00
|
|
|
__KernelMutexAcquireLock(mutex, count);
|
2012-11-18 07:08:19 +00:00
|
|
|
// Nobody had it locked - no need to block
|
|
|
|
return true;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
2012-11-18 07:08:19 +00:00
|
|
|
|
2013-02-25 04:28:39 +00:00
|
|
|
if (mutex->nm.lockThread == __KernelGetCurThread())
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
2013-03-30 20:48:29 +00:00
|
|
|
// __KernelLockMutexCheck() would've returned an error, so this must be recursive.
|
|
|
|
mutex->nm.lockLevel += count;
|
|
|
|
return true;
|
2012-11-18 07:08:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
bool __KernelUnlockMutex(Mutex *mutex, u32 &error)
|
|
|
|
{
|
|
|
|
__KernelMutexEraseLock(mutex);
|
|
|
|
|
|
|
|
bool wokeThreads = false;
|
2012-12-08 03:01:31 +00:00
|
|
|
std::vector<SceUID>::iterator iter;
|
|
|
|
while (!wokeThreads && !mutex->waitingThreads.empty())
|
|
|
|
{
|
|
|
|
if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0)
|
|
|
|
iter = __KernelMutexFindPriority(mutex->waitingThreads);
|
|
|
|
else
|
|
|
|
iter = mutex->waitingThreads.begin();
|
2012-11-20 08:18:11 +00:00
|
|
|
|
2012-12-08 03:01:31 +00:00
|
|
|
wokeThreads |= __KernelUnlockMutexForThread(mutex, *iter, error, 0);
|
2012-11-20 08:18:11 +00:00
|
|
|
mutex->waitingThreads.erase(iter);
|
|
|
|
}
|
|
|
|
|
2012-11-21 08:21:10 +00:00
|
|
|
if (!wokeThreads)
|
2013-02-25 04:28:39 +00:00
|
|
|
mutex->nm.lockThread = -1;
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
return wokeThreads;
|
|
|
|
}
|
|
|
|
|
2012-11-19 01:54:55 +00:00
|
|
|
void __KernelMutexTimeout(u64 userdata, int cyclesLate)
|
|
|
|
{
|
|
|
|
SceUID threadID = (SceUID)userdata;
|
2012-11-19 03:13:39 +00:00
|
|
|
|
|
|
|
u32 error;
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
if (timeoutPtr != 0)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
2013-03-25 05:54:09 +00:00
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
|
|
|
|
if (mutexID != 0)
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
|
2012-12-15 09:36:53 +00:00
|
|
|
|
|
|
|
// We intentionally don't remove from waitingThreads here yet.
|
|
|
|
// The reason is, if it times out, but what it was waiting on is DELETED prior to it
|
|
|
|
// actually running, it will get a DELETE result instead of a TIMEOUT.
|
|
|
|
// So, we need to remember it or we won't be able to mark it DELETE instead later.
|
2012-11-19 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2012-11-20 08:18:11 +00:00
|
|
|
void __KernelMutexThreadEnd(SceUID threadID)
|
|
|
|
{
|
|
|
|
u32 error;
|
|
|
|
|
|
|
|
// If it was waiting on the mutex, it should finish now.
|
2012-12-09 08:09:25 +00:00
|
|
|
SceUID waitingMutexID = __KernelGetWaitID(threadID, WAITTYPE_MUTEX, error);
|
|
|
|
if (waitingMutexID)
|
2012-11-20 08:18:11 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(waitingMutexID, error);
|
2012-11-20 08:18:11 +00:00
|
|
|
if (mutex)
|
|
|
|
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
|
|
|
|
}
|
|
|
|
|
2012-11-23 00:31:18 +00:00
|
|
|
// Unlock all mutexes the thread had locked.
|
|
|
|
std::pair<MutexMap::iterator, MutexMap::iterator> locked = mutexHeldLocks.equal_range(threadID);
|
|
|
|
for (MutexMap::iterator iter = locked.first; iter != locked.second; )
|
2012-11-20 08:18:11 +00:00
|
|
|
{
|
2012-11-23 00:31:18 +00:00
|
|
|
// Need to increment early so erase() doesn't invalidate.
|
|
|
|
SceUID mutexID = (*iter++).second;
|
2012-11-20 08:18:11 +00:00
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(mutexID, error);
|
|
|
|
|
2012-11-21 06:57:14 +00:00
|
|
|
if (mutex)
|
2012-11-23 00:31:18 +00:00
|
|
|
{
|
|
|
|
mutex->nm.lockLevel = 0;
|
2012-11-21 06:57:14 +00:00
|
|
|
__KernelUnlockMutex(mutex, error);
|
2012-11-23 00:31:18 +00:00
|
|
|
}
|
2012-11-20 08:18:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-19 03:40:19 +00:00
|
|
|
void __KernelWaitMutex(Mutex *mutex, u32 timeoutPtr)
|
2012-11-19 01:54:55 +00:00
|
|
|
{
|
2013-01-18 06:04:52 +00:00
|
|
|
if (timeoutPtr == 0 || mutexWaitTimer == -1)
|
2012-11-19 02:55:50 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
int micro = (int) Memory::Read_U32(timeoutPtr);
|
2012-12-02 07:05:03 +00:00
|
|
|
|
|
|
|
// This happens to be how the hardware seems to time things.
|
|
|
|
if (micro <= 3)
|
|
|
|
micro = 15;
|
|
|
|
else if (micro <= 249)
|
|
|
|
micro = 250;
|
|
|
|
|
|
|
|
// This should call __KernelMutexTimeout() later, unless we cancel it.
|
2012-11-19 15:31:36 +00:00
|
|
|
CoreTiming::ScheduleEvent(usToCycles(micro), mutexWaitTimer, __KernelGetCurThread());
|
2012-11-19 01:54:55 +00:00
|
|
|
}
|
|
|
|
|
2012-11-18 07:08:19 +00:00
|
|
|
// int sceKernelLockMutex(SceUID id, int count, int *timeout)
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelLockMutex(SceUID id, int count, u32 timeoutPtr)
|
2012-11-18 07:08:19 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelLockMutex(%i, %i, %08x)", id, count, timeoutPtr);
|
2012-11-18 07:08:19 +00:00
|
|
|
u32 error;
|
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
|
|
|
|
|
|
|
|
if (__KernelLockMutex(mutex, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-18 07:08:19 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-01 15:19:01 +00:00
|
|
|
else
|
|
|
|
{
|
2013-01-01 07:23:52 +00:00
|
|
|
SceUID threadID = __KernelGetCurThread();
|
|
|
|
// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
|
|
|
|
if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
2012-11-19 03:40:19 +00:00
|
|
|
__KernelWaitMutex(mutex, timeoutPtr);
|
2013-01-26 18:44:04 +00:00
|
|
|
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, false, "mutex waited");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
// Return value will be overwritten by wait.
|
|
|
|
return 0;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-15 07:34:52 +00:00
|
|
|
// int sceKernelLockMutexCB(SceUID id, int count, int *timeout)
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelLockMutexCB(SceUID id, int count, u32 timeoutPtr)
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelLockMutexCB(%i, %i, %08x)", id, count, timeoutPtr);
|
2012-11-14 16:00:57 +00:00
|
|
|
u32 error;
|
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
|
2012-11-18 04:21:05 +00:00
|
|
|
|
2013-03-30 20:48:29 +00:00
|
|
|
if (!__KernelLockMutexCheck(mutex, count, error))
|
2012-11-14 16:00:57 +00:00
|
|
|
{
|
2013-03-30 20:48:29 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2013-01-01 07:23:52 +00:00
|
|
|
SceUID threadID = __KernelGetCurThread();
|
|
|
|
// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
|
|
|
|
if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
2012-11-19 03:40:19 +00:00
|
|
|
__KernelWaitMutex(mutex, timeoutPtr);
|
2013-01-26 18:44:04 +00:00
|
|
|
__KernelWaitCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr, true, "mutex waited");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
// Return value will be overwritten by wait.
|
|
|
|
return 0;
|
2012-11-14 16:00:57 +00:00
|
|
|
}
|
2013-03-30 20:48:29 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (__KernelCurHasReadyCallbacks())
|
|
|
|
{
|
|
|
|
// Might actually end up having to wait, so set the timeout.
|
|
|
|
__KernelWaitMutex(mutex, timeoutPtr);
|
|
|
|
__KernelWaitCallbacksCurThread(WAITTYPE_MUTEX, id, count, timeoutPtr);
|
|
|
|
|
|
|
|
// Return value will be written to callback's v0, but... that's probably fine?
|
|
|
|
}
|
|
|
|
else
|
|
|
|
__KernelLockMutex(mutex, count, error);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
|
2012-11-18 04:21:05 +00:00
|
|
|
// int sceKernelTryLockMutex(SceUID id, int count)
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelTryLockMutex(SceUID id, int count)
|
2012-11-01 15:19:01 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelTryLockMutex(%i, %i)", id, count);
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
|
2012-11-18 04:21:05 +00:00
|
|
|
|
2012-11-18 07:08:19 +00:00
|
|
|
if (__KernelLockMutex(mutex, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-18 07:08:19 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-18 04:21:05 +00:00
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
|
2012-11-18 04:21:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// int sceKernelUnlockMutex(SceUID id, int count)
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelUnlockMutex(SceUID id, int count)
|
2012-11-18 04:21:05 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelUnlockMutex(%i, %i)", id, count);
|
2012-11-18 04:21:05 +00:00
|
|
|
u32 error;
|
|
|
|
Mutex *mutex = kernelObjects.Get<Mutex>(id, error);
|
|
|
|
|
|
|
|
if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
|
|
|
if (count <= 0)
|
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
|
|
|
if ((mutex->nm.attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
|
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2013-02-25 04:28:39 +00:00
|
|
|
if (mutex->nm.lockLevel == 0 || mutex->nm.lockThread != __KernelGetCurThread())
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_MUTEX_ERROR_NOT_LOCKED;
|
|
|
|
if (mutex->nm.lockLevel < count)
|
|
|
|
return PSP_MUTEX_ERROR_UNLOCK_UNDERFLOW;
|
2012-11-18 04:21:05 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
mutex->nm.lockLevel -= count;
|
2012-11-14 16:00:57 +00:00
|
|
|
|
|
|
|
if (mutex->nm.lockLevel == 0)
|
|
|
|
{
|
2012-11-26 08:41:55 +00:00
|
|
|
if (__KernelUnlockMutex(mutex, error))
|
2012-12-09 06:39:36 +00:00
|
|
|
hleReSchedule("mutex unlocked");
|
2012-11-14 16:00:57 +00:00
|
|
|
}
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
return 0;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
2012-11-06 19:56:19 +00:00
|
|
|
|
2013-02-25 06:50:48 +00:00
|
|
|
int sceKernelReferMutexStatus(SceUID id, u32 infoAddr)
|
|
|
|
{
|
|
|
|
u32 error;
|
|
|
|
Mutex *m = kernelObjects.Get<Mutex>(id, error);
|
|
|
|
if (!m)
|
|
|
|
{
|
|
|
|
ERROR_LOG(HLE, "sceKernelReferMutexStatus(%i, %08x): invalid mutex id", id, infoAddr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG_LOG(HLE, "sceKernelReferMutexStatus(%08x, %08x)", id, infoAddr);
|
|
|
|
|
|
|
|
// Should we crash the thread somehow?
|
|
|
|
if (!Memory::IsValidAddress(infoAddr))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
// Don't write if the size is 0. Anything else is A-OK, though, apparently.
|
|
|
|
if (Memory::Read_U32(infoAddr) != 0)
|
|
|
|
{
|
|
|
|
// Refresh and write
|
2013-04-06 03:29:20 +00:00
|
|
|
m->nm.numWaitThreads = (int) m->waitingThreads.size();
|
2013-02-25 06:50:48 +00:00
|
|
|
Memory::WriteStruct(infoAddr, &m->nm);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelCreateLwMutex(u32 workareaPtr, const char *name, u32 attr, int initialCount, u32 optionsPtr)
|
2012-11-06 19:56:19 +00:00
|
|
|
{
|
2012-11-19 09:01:19 +00:00
|
|
|
if (!name)
|
2012-12-14 06:46:22 +00:00
|
|
|
{
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateLwMutex(): invalid name", SCE_KERNEL_ERROR_ERROR);
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ERROR;
|
2012-12-14 06:46:22 +00:00
|
|
|
}
|
|
|
|
if (attr >= 0x400)
|
|
|
|
{
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "%08x=sceKernelCreateLwMutex(): invalid attr parameter: %08x", SCE_KERNEL_ERROR_ILLEGAL_ATTR, attr);
|
2012-12-14 06:46:22 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_ATTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (initialCount < 0)
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2012-12-14 06:46:22 +00:00
|
|
|
if ((attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && initialCount > 1)
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2012-11-19 09:01:19 +00:00
|
|
|
|
|
|
|
LwMutex *mutex = new LwMutex();
|
|
|
|
SceUID id = kernelObjects.Create(mutex);
|
2013-02-25 06:50:48 +00:00
|
|
|
mutex->nm.size = sizeof(mutex->nm);
|
2012-12-17 00:32:18 +00:00
|
|
|
strncpy(mutex->nm.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
|
|
|
|
mutex->nm.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
|
2012-11-19 09:01:19 +00:00
|
|
|
mutex->nm.attr = attr;
|
2013-02-25 06:50:48 +00:00
|
|
|
mutex->nm.uid = id;
|
2012-11-19 09:01:19 +00:00
|
|
|
mutex->nm.workareaPtr = workareaPtr;
|
2013-02-25 06:50:48 +00:00
|
|
|
mutex->nm.initialCount = initialCount;
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
|
|
|
workarea->init();
|
|
|
|
workarea->lockLevel = initialCount;
|
2012-11-19 09:01:19 +00:00
|
|
|
if (initialCount == 0)
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockThread = 0;
|
2012-11-19 09:01:19 +00:00
|
|
|
else
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockThread = __KernelGetCurThread();
|
|
|
|
workarea->attr = attr;
|
|
|
|
workarea->uid = id;
|
2012-11-19 09:01:19 +00:00
|
|
|
|
2012-12-14 06:46:22 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelCreateLwMutex(%08x, %s, %08x, %d, %08x)", workareaPtr, name, attr, initialCount, optionsPtr);
|
|
|
|
|
2012-11-19 09:01:19 +00:00
|
|
|
if (optionsPtr != 0)
|
2013-05-18 22:30:10 +00:00
|
|
|
{
|
|
|
|
u32 size = Memory::Read_U32(optionsPtr);
|
|
|
|
if (size != 0)
|
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelCreateLwMutex(%s) unsupported options parameter, size = %d", name, size);
|
|
|
|
}
|
2012-12-15 07:24:04 +00:00
|
|
|
if ((attr & ~PSP_MUTEX_ATTR_KNOWN) != 0)
|
2013-03-26 07:54:00 +00:00
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelCreateLwMutex(%s) unsupported attr parameter: %08x", name, attr);
|
2012-11-06 19:56:19 +00:00
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-06 19:56:19 +00:00
|
|
|
}
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
template <typename T>
|
|
|
|
bool __KernelUnlockLwMutexForThread(LwMutex *mutex, T workarea, SceUID threadID, u32 &error, int result)
|
2012-12-08 03:01:31 +00:00
|
|
|
{
|
|
|
|
SceUID waitID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
|
|
|
|
// The waitID may be different after a timeout.
|
|
|
|
if (waitID != mutex->GetUID())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If result is an error code, we're just letting it go.
|
|
|
|
if (result == 0)
|
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockLevel = (int) __KernelGetWaitValue(threadID, error);
|
|
|
|
workarea->lockThread = threadID;
|
2012-12-08 03:01:31 +00:00
|
|
|
}
|
|
|
|
|
2013-01-18 06:04:52 +00:00
|
|
|
if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
|
2012-12-08 03:01:31 +00:00
|
|
|
{
|
|
|
|
// Remove any event for this thread.
|
2013-04-06 03:42:20 +00:00
|
|
|
s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
|
2012-12-08 03:01:31 +00:00
|
|
|
Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
|
|
|
|
}
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, result);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelDeleteLwMutex(u32 workareaPtr)
|
2012-11-06 19:56:19 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelDeleteLwMutex(%08x)", workareaPtr);
|
2012-11-19 09:01:19 +00:00
|
|
|
|
|
|
|
if (!workareaPtr || !Memory::IsValidAddress(workareaPtr))
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_ADDR;
|
2012-11-19 09:01:19 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-19 09:01:19 +00:00
|
|
|
|
|
|
|
u32 error;
|
2013-03-31 03:12:22 +00:00
|
|
|
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
|
2012-11-19 09:01:19 +00:00
|
|
|
if (mutex)
|
|
|
|
{
|
2012-11-26 08:41:55 +00:00
|
|
|
bool wokeThreads = false;
|
2012-11-19 09:01:19 +00:00
|
|
|
std::vector<SceUID>::iterator iter, end;
|
|
|
|
for (iter = mutex->waitingThreads.begin(), end = mutex->waitingThreads.end(); iter != end; ++iter)
|
2012-12-08 03:01:31 +00:00
|
|
|
wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, SCE_KERNEL_ERROR_WAIT_DELETE);
|
2012-11-26 08:41:55 +00:00
|
|
|
mutex->waitingThreads.clear();
|
2012-11-19 09:01:19 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->clear();
|
2012-11-19 09:01:19 +00:00
|
|
|
|
2012-11-26 08:41:55 +00:00
|
|
|
if (wokeThreads)
|
2012-12-09 06:39:36 +00:00
|
|
|
hleReSchedule("lwmutex deleted");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
return kernelObjects.Destroy<LwMutex>(mutex->GetUID());
|
2012-11-19 09:01:19 +00:00
|
|
|
}
|
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-06 19:56:19 +00:00
|
|
|
}
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
template <typename T>
|
|
|
|
bool __KernelLockLwMutex(T workarea, int count, u32 &error)
|
2012-11-06 19:56:19 +00:00
|
|
|
{
|
2012-11-21 08:21:10 +00:00
|
|
|
if (!error)
|
|
|
|
{
|
|
|
|
if (count <= 0)
|
|
|
|
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2013-03-31 03:12:22 +00:00
|
|
|
else if (count > 1 && !(workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE))
|
2012-11-21 08:21:10 +00:00
|
|
|
error = SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
|
|
|
// Two positive ints will always overflow to negative.
|
2013-03-31 03:12:22 +00:00
|
|
|
else if (count + workarea->lockLevel < 0)
|
2012-11-21 08:21:10 +00:00
|
|
|
error = PSP_LWMUTEX_ERROR_LOCK_OVERFLOW;
|
2013-03-31 03:12:22 +00:00
|
|
|
else if (workarea->uid == -1)
|
2012-11-21 08:21:10 +00:00
|
|
|
error = PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return false;
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->lockLevel == 0)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->lockThread != 0)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
|
|
|
// Validate that it actually exists so we can return an error if not.
|
2013-03-31 03:12:22 +00:00
|
|
|
kernelObjects.Get<LwMutex>(workarea->uid, error);
|
2012-11-21 08:21:10 +00:00
|
|
|
if (error)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockLevel = count;
|
|
|
|
workarea->lockThread = __KernelGetCurThread();
|
2012-11-21 08:21:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->lockThread == __KernelGetCurThread())
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
|
|
|
// Recursive mutex, let's just increase the lock count and keep going
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockLevel += count;
|
2012-11-21 08:21:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
error = PSP_LWMUTEX_ERROR_ALREADY_LOCKED;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2012-11-06 19:56:19 +00:00
|
|
|
}
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
template <typename T>
|
|
|
|
bool __KernelUnlockLwMutex(T workarea, u32 &error)
|
2012-11-06 19:56:19 +00:00
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
|
2012-11-21 08:21:10 +00:00
|
|
|
if (error)
|
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockThread = 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool wokeThreads = false;
|
2012-12-08 03:01:31 +00:00
|
|
|
std::vector<SceUID>::iterator iter;
|
|
|
|
while (!wokeThreads && !mutex->waitingThreads.empty())
|
|
|
|
{
|
|
|
|
if ((mutex->nm.attr & PSP_MUTEX_ATTR_PRIORITY) != 0)
|
|
|
|
iter = __KernelMutexFindPriority(mutex->waitingThreads);
|
|
|
|
else
|
|
|
|
iter = mutex->waitingThreads.begin();
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2012-12-08 03:01:31 +00:00
|
|
|
wokeThreads |= __KernelUnlockLwMutexForThread(mutex, workarea, *iter, error, 0);
|
2012-11-21 08:21:10 +00:00
|
|
|
mutex->waitingThreads.erase(iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!wokeThreads)
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockThread = 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
|
|
|
|
return wokeThreads;
|
2012-11-07 14:44:48 +00:00
|
|
|
}
|
|
|
|
|
2012-11-21 08:21:10 +00:00
|
|
|
void __KernelLwMutexTimeout(u64 userdata, int cyclesLate)
|
2012-11-07 14:44:48 +00:00
|
|
|
{
|
2012-11-21 08:21:10 +00:00
|
|
|
SceUID threadID = (SceUID)userdata;
|
|
|
|
|
|
|
|
u32 error;
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
if (timeoutPtr != 0)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
2013-03-25 05:54:09 +00:00
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
|
|
|
|
if (mutexID != 0)
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
|
2012-12-15 09:36:53 +00:00
|
|
|
|
|
|
|
// We intentionally don't remove from waitingThreads here yet.
|
|
|
|
// The reason is, if it times out, but what it was waiting on is DELETED prior to it
|
|
|
|
// actually running, it will get a DELETE result instead of a TIMEOUT.
|
|
|
|
// So, we need to remember it or we won't be able to mark it DELETE instead later.
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelWaitLwMutex(LwMutex *mutex, u32 timeoutPtr)
|
|
|
|
{
|
2013-01-18 06:04:52 +00:00
|
|
|
if (timeoutPtr == 0 || lwMutexWaitTimer == -1)
|
2012-11-21 08:21:10 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
int micro = (int) Memory::Read_U32(timeoutPtr);
|
2012-12-01 06:20:14 +00:00
|
|
|
|
|
|
|
// This happens to be how the hardware seems to time things.
|
|
|
|
if (micro <= 3)
|
|
|
|
micro = 15;
|
|
|
|
else if (micro <= 249)
|
|
|
|
micro = 250;
|
|
|
|
|
2012-12-02 07:05:03 +00:00
|
|
|
// This should call __KernelLwMutexTimeout() later, unless we cancel it.
|
2012-11-21 08:21:10 +00:00
|
|
|
CoreTiming::ScheduleEvent(usToCycles(micro), lwMutexWaitTimer, __KernelGetCurThread());
|
|
|
|
}
|
|
|
|
|
2013-03-30 22:23:52 +00:00
|
|
|
void __KernelLwMutexBeginCallback(SceUID threadID, SceUID prevCallbackId)
|
|
|
|
{
|
|
|
|
SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;
|
|
|
|
|
|
|
|
u32 error;
|
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error);
|
|
|
|
if (mutex)
|
|
|
|
{
|
2013-03-30 22:59:14 +00:00
|
|
|
// This means two callbacks in a row. PSP crashes if the same callback runs inside itself.
|
|
|
|
// TODO: Handle this better?
|
2013-03-30 22:23:52 +00:00
|
|
|
if (mutex->pausedWaitTimeouts.find(pauseKey) != mutex->pausedWaitTimeouts.end())
|
2013-03-30 22:59:14 +00:00
|
|
|
return;
|
2013-03-30 22:23:52 +00:00
|
|
|
|
|
|
|
if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
|
|
|
|
{
|
2013-04-06 03:42:20 +00:00
|
|
|
s64 cyclesLeft = CoreTiming::UnscheduleEvent(lwMutexWaitTimer, threadID);
|
2013-03-30 22:23:52 +00:00
|
|
|
mutex->pausedWaitTimeouts[pauseKey] = CoreTiming::GetTicks() + cyclesLeft;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
mutex->pausedWaitTimeouts[pauseKey] = 0;
|
|
|
|
|
|
|
|
// TODO: Hmm, what about priority/fifo order? Does it lose its place in line?
|
|
|
|
mutex->waitingThreads.erase(std::remove(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID), mutex->waitingThreads.end());
|
|
|
|
|
|
|
|
DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Suspending lock wait for callback");
|
|
|
|
}
|
|
|
|
else
|
|
|
|
WARN_LOG_REPORT(HLE, "sceKernelLockLwMutexCB: beginning callback with bad wait id?");
|
|
|
|
}
|
|
|
|
|
|
|
|
void __KernelLwMutexEndCallback(SceUID threadID, SceUID prevCallbackId, u32 &returnValue)
|
|
|
|
{
|
|
|
|
SceUID pauseKey = prevCallbackId == 0 ? threadID : prevCallbackId;
|
|
|
|
|
|
|
|
u32 error;
|
|
|
|
SceUID mutexID = __KernelGetWaitID(threadID, WAITTYPE_LWMUTEX, error);
|
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
|
|
|
LwMutex *mutex = mutexID == 0 ? NULL : kernelObjects.Get<LwMutex>(mutexID, error);
|
|
|
|
if (!mutex || mutex->pausedWaitTimeouts.find(pauseKey) == mutex->pausedWaitTimeouts.end())
|
|
|
|
{
|
|
|
|
// TODO: Since it was deleted, we don't know how long was actually left.
|
|
|
|
// For now, we just say the full time was taken.
|
|
|
|
if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_DELETE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 waitDeadline = mutex->pausedWaitTimeouts[pauseKey];
|
|
|
|
mutex->pausedWaitTimeouts.erase(pauseKey);
|
|
|
|
|
|
|
|
// TODO: Don't wake up if __KernelCurHasReadyCallbacks()?
|
|
|
|
|
|
|
|
// Attempt to unlock.
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(mutex->nm.workareaPtr);
|
2013-03-30 22:23:52 +00:00
|
|
|
if (mutex->nm.lockThread == -1 && __KernelUnlockLwMutexForThread(mutex, workarea, threadID, error, 0))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We only check if it timed out if it couldn't unlock.
|
|
|
|
s64 cyclesLeft = waitDeadline - CoreTiming::GetTicks();
|
|
|
|
if (cyclesLeft < 0 && waitDeadline != 0)
|
|
|
|
{
|
|
|
|
if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
|
|
|
__KernelResumeThreadFromWait(threadID, SCE_KERNEL_ERROR_WAIT_TIMEOUT);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-04-15 04:34:33 +00:00
|
|
|
if (timeoutPtr != 0 && lwMutexWaitTimer != -1)
|
|
|
|
CoreTiming::ScheduleEvent(cyclesLeft, lwMutexWaitTimer, __KernelGetCurThread());
|
|
|
|
|
2013-03-30 22:23:52 +00:00
|
|
|
// TODO: Should this not go at the end?
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
|
|
|
|
|
|
|
DEBUG_LOG(HLE, "sceKernelLockLwMutexCB: Resuming lock wait for callback");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelTryLockLwMutex(u32 workareaPtr, int count)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelTryLockLwMutex(%08x, %i)", workareaPtr, count);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
|
|
|
u32 error = 0;
|
|
|
|
if (__KernelLockLwMutex(workarea, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
|
|
|
// Unlike sceKernelTryLockLwMutex_600, this always returns the same error.
|
2012-11-21 08:21:10 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
|
2012-11-21 08:21:10 +00:00
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_MUTEX_ERROR_TRYLOCK_FAILED;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelTryLockLwMutex_600(u32 workareaPtr, int count)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
DEBUG_LOG(HLE, "sceKernelTryLockLwMutex_600(%08x, %i)", workareaPtr, count);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
|
|
|
u32 error = 0;
|
|
|
|
if (__KernelLockLwMutex(workarea, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-21 08:21:10 +00:00
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_LWMUTEX_ERROR_TRYLOCK_FAILED;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelLockLwMutex(u32 workareaPtr, int count, u32 timeoutPtr)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2013-03-11 05:57:27 +00:00
|
|
|
VERBOSE_LOG(HLE, "sceKernelLockLwMutex(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
|
|
|
u32 error = 0;
|
|
|
|
if (__KernelLockLwMutex(workarea, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-21 08:21:10 +00:00
|
|
|
else
|
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
|
2012-11-21 08:21:10 +00:00
|
|
|
if (mutex)
|
|
|
|
{
|
2013-01-01 07:23:52 +00:00
|
|
|
SceUID threadID = __KernelGetCurThread();
|
|
|
|
// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
|
|
|
|
if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
2012-11-21 08:21:10 +00:00
|
|
|
__KernelWaitLwMutex(mutex, timeoutPtr);
|
2013-03-31 03:12:22 +00:00
|
|
|
__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea->uid, count, timeoutPtr, false, "lwmutex waited");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
// Return value will be overwritten by wait.
|
|
|
|
return 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelLockLwMutexCB(u32 workareaPtr, int count, u32 timeoutPtr)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2013-03-11 05:57:27 +00:00
|
|
|
VERBOSE_LOG(HLE, "sceKernelLockLwMutexCB(%08x, %i, %08x)", workareaPtr, count, timeoutPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
|
|
|
u32 error = 0;
|
|
|
|
if (__KernelLockLwMutex(workarea, count, error))
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
else if (error)
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-21 08:21:10 +00:00
|
|
|
else
|
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
LwMutex *mutex = kernelObjects.Get<LwMutex>(workarea->uid, error);
|
2012-11-21 08:21:10 +00:00
|
|
|
if (mutex)
|
|
|
|
{
|
2013-01-01 07:23:52 +00:00
|
|
|
SceUID threadID = __KernelGetCurThread();
|
|
|
|
// May be in a tight loop timing out (where we don't remove from waitingThreads yet), don't want to add duplicates.
|
|
|
|
if (std::find(mutex->waitingThreads.begin(), mutex->waitingThreads.end(), threadID) == mutex->waitingThreads.end())
|
|
|
|
mutex->waitingThreads.push_back(threadID);
|
2012-11-21 08:21:10 +00:00
|
|
|
__KernelWaitLwMutex(mutex, timeoutPtr);
|
2013-03-31 03:12:22 +00:00
|
|
|
__KernelWaitCurThread(WAITTYPE_LWMUTEX, workarea->uid, count, timeoutPtr, true, "lwmutex cb waited");
|
2012-12-09 08:09:25 +00:00
|
|
|
|
|
|
|
// Return value will be overwritten by wait.
|
|
|
|
return 0;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
else
|
2012-12-09 08:09:25 +00:00
|
|
|
return error;
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
2012-11-06 19:56:19 +00:00
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
int sceKernelUnlockLwMutex(u32 workareaPtr, int count)
|
2012-11-06 19:56:19 +00:00
|
|
|
{
|
2013-03-11 05:57:27 +00:00
|
|
|
VERBOSE_LOG(HLE, "sceKernelUnlockLwMutex(%08x, %i)", workareaPtr, count);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->uid == -1)
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_LWMUTEX_ERROR_NO_SUCH_LWMUTEX;
|
2012-11-21 08:21:10 +00:00
|
|
|
else if (count <= 0)
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2013-03-31 03:12:22 +00:00
|
|
|
else if ((workarea->attr & PSP_MUTEX_ATTR_ALLOW_RECURSIVE) == 0 && count > 1)
|
2012-12-09 08:09:25 +00:00
|
|
|
return SCE_KERNEL_ERROR_ILLEGAL_COUNT;
|
2013-03-31 03:12:22 +00:00
|
|
|
else if (workarea->lockLevel == 0 || workarea->lockThread != __KernelGetCurThread())
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_LWMUTEX_ERROR_NOT_LOCKED;
|
2013-03-31 03:12:22 +00:00
|
|
|
else if (workarea->lockLevel < count)
|
2012-12-09 08:09:25 +00:00
|
|
|
return PSP_LWMUTEX_ERROR_UNLOCK_UNDERFLOW;
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
workarea->lockLevel -= count;
|
2012-11-21 08:21:10 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
if (workarea->lockLevel == 0)
|
2012-11-21 08:21:10 +00:00
|
|
|
{
|
2012-12-09 08:09:25 +00:00
|
|
|
u32 error;
|
2012-11-26 08:41:55 +00:00
|
|
|
if (__KernelUnlockLwMutex(workarea, error))
|
2012-12-09 06:39:36 +00:00
|
|
|
hleReSchedule("lwmutex unlocked");
|
2012-11-21 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
2012-12-09 08:09:25 +00:00
|
|
|
return 0;
|
2012-12-23 10:19:55 +00:00
|
|
|
}
|
2013-02-25 06:50:48 +00:00
|
|
|
|
|
|
|
int __KernelReferLwMutexStatus(SceUID uid, u32 infoPtr)
|
|
|
|
{
|
|
|
|
u32 error;
|
|
|
|
LwMutex *m = kernelObjects.Get<LwMutex>(uid, error);
|
|
|
|
if (!m)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
// Should we crash the thread somehow?
|
|
|
|
if (!Memory::IsValidAddress(infoPtr))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (Memory::Read_U32(infoPtr) != 0)
|
|
|
|
{
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(m->nm.workareaPtr);
|
2013-02-25 06:50:48 +00:00
|
|
|
|
|
|
|
// Refresh and write
|
2013-03-31 03:12:22 +00:00
|
|
|
m->nm.currentCount = workarea->lockLevel;
|
|
|
|
m->nm.lockThread = workarea->lockThread == 0 ? -1 : workarea->lockThread;
|
2013-04-06 03:29:20 +00:00
|
|
|
m->nm.numWaitThreads = (int) m->waitingThreads.size();
|
2013-02-25 06:50:48 +00:00
|
|
|
Memory::WriteStruct(infoPtr, &m->nm);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int sceKernelReferLwMutexStatusByID(SceUID uid, u32 infoPtr)
|
|
|
|
{
|
|
|
|
int error = __KernelReferLwMutexStatus(uid, infoPtr);
|
|
|
|
if (error >= 0)
|
|
|
|
{
|
|
|
|
DEBUG_LOG(HLE, "sceKernelReferLwMutexStatusByID(%08x, %08x)", uid, infoPtr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ERROR_LOG(HLE, "%08x=sceKernelReferLwMutexStatusByID(%08x, %08x)", error, uid, infoPtr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int sceKernelReferLwMutexStatus(u32 workareaPtr, u32 infoPtr)
|
|
|
|
{
|
|
|
|
if (!Memory::IsValidAddress(workareaPtr))
|
|
|
|
return -1;
|
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
auto workarea = Memory::GetStruct<NativeLwMutexWorkarea>(workareaPtr);
|
2013-02-25 06:50:48 +00:00
|
|
|
|
2013-03-31 03:12:22 +00:00
|
|
|
int error = __KernelReferLwMutexStatus(workarea->uid, infoPtr);
|
2013-02-25 06:50:48 +00:00
|
|
|
if (error >= 0)
|
|
|
|
{
|
|
|
|
DEBUG_LOG(HLE, "sceKernelReferLwMutexStatus(%08x, %08x)", workareaPtr, infoPtr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ERROR_LOG(HLE, "%08x=sceKernelReferLwMutexStatus(%08x, %08x)", error, workareaPtr, infoPtr);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
}
|