2012-11-01 15:19:01 +00:00
|
|
|
// Copyright (c) 2012- PPSSPP Project.
|
|
|
|
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
2012-11-04 22:01:49 +00:00
|
|
|
// the Free Software Foundation, version 2.0 or later versions.
|
2012-11-01 15:19:01 +00:00
|
|
|
|
|
|
|
// This program is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License 2.0 for more details.
|
|
|
|
|
|
|
|
// A copy of the GPL 2.0 should have been included with the program.
|
|
|
|
// If not, see http://www.gnu.org/licenses/
|
|
|
|
|
|
|
|
// Official git repository and contact information can be found at
|
|
|
|
// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/.
|
|
|
|
|
2020-08-10 07:12:51 +00:00
|
|
|
#include "Common/Serialize/Serializer.h"
|
|
|
|
#include "Common/Serialize/SerializeFuncs.h"
|
|
|
|
#include "Common/Serialize/SerializeMap.h"
|
2013-08-18 00:17:36 +00:00
|
|
|
#include "Core/HLE/HLE.h"
|
|
|
|
#include "Core/MIPS/MIPS.h"
|
2013-02-04 04:31:46 +00:00
|
|
|
#include "Core/CoreTiming.h"
|
2015-04-06 01:09:35 +00:00
|
|
|
#include "Core/MemMapHelpers.h"
|
2013-03-26 07:54:00 +00:00
|
|
|
#include "Core/Reporting.h"
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2013-08-18 00:17:36 +00:00
|
|
|
#include "Core/HLE/sceKernel.h"
|
|
|
|
#include "Core/HLE/sceKernelThread.h"
|
|
|
|
#include "Core/HLE/sceKernelEventFlag.h"
|
2013-09-03 00:59:47 +00:00
|
|
|
#include "Core/HLE/KernelWaitHelpers.h"
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2012-12-13 08:15:25 +00:00
|
|
|
void __KernelEventFlagTimeout(u64 userdata, int cycleslate);
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
struct NativeEventFlag {
|
2013-07-25 06:58:45 +00:00
|
|
|
u32_le size;
|
2012-12-17 00:32:18 +00:00
|
|
|
char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
|
2013-07-25 06:58:45 +00:00
|
|
|
u32_le attr;
|
|
|
|
u32_le initPattern;
|
|
|
|
u32_le currentPattern;
|
|
|
|
s32_le numWaitThreads;
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
struct EventFlagTh {
|
2013-09-03 00:59:47 +00:00
|
|
|
SceUID threadID;
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 bits;
|
|
|
|
u32 wait;
|
|
|
|
u32 outAddr;
|
2013-04-15 04:00:02 +00:00
|
|
|
u64 pausedTimeout;
|
2013-09-09 04:41:54 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
bool operator ==(const SceUID &otherThreadID) const {
|
2013-09-09 04:41:54 +00:00
|
|
|
return threadID == otherThreadID;
|
|
|
|
}
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
class EventFlag : public KernelObject {
|
2012-11-01 15:19:01 +00:00
|
|
|
public:
|
2014-12-08 20:14:35 +00:00
|
|
|
const char *GetName() override { return nef.name; }
|
2020-07-20 09:01:52 +00:00
|
|
|
const char *GetTypeName() override { return GetStaticTypeName(); }
|
|
|
|
static const char *GetStaticTypeName() { return "EventFlag"; }
|
2015-10-25 16:06:10 +00:00
|
|
|
void GetQuickInfo(char *ptr, int size) override {
|
2012-11-01 15:19:01 +00:00
|
|
|
sprintf(ptr, "init=%08x cur=%08x numwait=%i",
|
|
|
|
nef.initPattern,
|
|
|
|
nef.currentPattern,
|
|
|
|
nef.numWaitThreads);
|
|
|
|
}
|
2012-12-28 22:33:00 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
static u32 GetMissingErrorCode() {
|
|
|
|
return SCE_KERNEL_ERROR_UNKNOWN_EVFID;
|
|
|
|
}
|
2013-06-19 06:54:29 +00:00
|
|
|
static int GetStaticIDType() { return SCE_KERNEL_TMID_EventFlag; }
|
2014-12-08 20:14:35 +00:00
|
|
|
int GetIDType() const override { return SCE_KERNEL_TMID_EventFlag; }
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void DoState(PointerWrap &p) override {
|
2013-09-15 03:23:03 +00:00
|
|
|
auto s = p.Section("EventFlag", 1);
|
|
|
|
if (!s)
|
|
|
|
return;
|
|
|
|
|
2020-08-10 04:20:42 +00:00
|
|
|
Do(p, nef);
|
2015-10-25 16:06:10 +00:00
|
|
|
EventFlagTh eft = { 0 };
|
2020-08-10 04:20:42 +00:00
|
|
|
Do(p, waitingThreads, eft);
|
|
|
|
Do(p, pausedWaits);
|
2012-12-27 04:34:10 +00:00
|
|
|
}
|
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
NativeEventFlag nef;
|
|
|
|
std::vector<EventFlagTh> waitingThreads;
|
2013-04-15 04:00:02 +00:00
|
|
|
// Key is the callback id it was for, or if no callback, the thread id.
|
|
|
|
std::map<SceUID, EventFlagTh> pausedWaits;
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/** Event flag creation attributes */
|
2015-10-25 16:06:10 +00:00
|
|
|
enum PspEventFlagAttributes {
|
2012-11-01 15:19:01 +00:00
|
|
|
/** Allow the event flag to be waited upon by multiple threads */
|
|
|
|
PSP_EVENT_WAITMULTIPLE = 0x200
|
|
|
|
};
|
|
|
|
|
|
|
|
/** Event flag wait types */
|
2015-10-25 16:06:10 +00:00
|
|
|
enum PspEventFlagWaitTypes {
|
2012-11-01 15:19:01 +00:00
|
|
|
/** Wait for all bits in the pattern to be set */
|
2012-12-15 18:06:56 +00:00
|
|
|
PSP_EVENT_WAITAND = 0x00,
|
2012-11-01 15:19:01 +00:00
|
|
|
/** Wait for one or more bits in the pattern to be set */
|
2012-12-15 18:06:56 +00:00
|
|
|
PSP_EVENT_WAITOR = 0x01,
|
|
|
|
/** Clear the entire pattern when it matches. */
|
|
|
|
PSP_EVENT_WAITCLEARALL = 0x10,
|
2012-11-01 15:19:01 +00:00
|
|
|
/** Clear the wait pattern when it matches */
|
2012-12-14 20:04:04 +00:00
|
|
|
PSP_EVENT_WAITCLEAR = 0x20,
|
|
|
|
|
2012-12-15 18:06:56 +00:00
|
|
|
PSP_EVENT_WAITKNOWN = PSP_EVENT_WAITCLEAR | PSP_EVENT_WAITCLEARALL | PSP_EVENT_WAITOR,
|
2012-11-01 15:19:01 +00:00
|
|
|
};
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
static int eventFlagWaitTimer = -1;
|
2012-12-13 08:15:25 +00:00
|
|
|
|
2013-04-15 04:00:02 +00:00
|
|
|
void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId);
|
2013-09-02 23:29:00 +00:00
|
|
|
void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId);
|
2013-04-15 04:00:02 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void __KernelEventFlagInit() {
|
2012-12-27 22:21:39 +00:00
|
|
|
eventFlagWaitTimer = CoreTiming::RegisterEvent("EventFlagTimeout", __KernelEventFlagTimeout);
|
2013-04-15 04:00:02 +00:00
|
|
|
__KernelRegisterWaitTypeFuncs(WAITTYPE_EVENTFLAG, __KernelEventFlagBeginCallback, __KernelEventFlagEndCallback);
|
2012-12-27 22:21:39 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void __KernelEventFlagDoState(PointerWrap &p) {
|
2013-09-15 03:23:03 +00:00
|
|
|
auto s = p.Section("sceKernelEventFlag", 1);
|
|
|
|
if (!s)
|
|
|
|
return;
|
|
|
|
|
2020-08-10 04:20:42 +00:00
|
|
|
Do(p, eventFlagWaitTimer);
|
2012-12-27 22:21:39 +00:00
|
|
|
CoreTiming::RestoreRegisterEvent(eventFlagWaitTimer, "EventFlagTimeout", __KernelEventFlagTimeout);
|
2012-12-13 08:15:25 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 15:28:07 +00:00
|
|
|
KernelObject *__KernelEventFlagObject() {
|
2012-12-27 06:45:19 +00:00
|
|
|
// Default object to load from state.
|
|
|
|
return new EventFlag;
|
|
|
|
}
|
|
|
|
|
2015-10-25 15:28:07 +00:00
|
|
|
static bool __KernelCheckEventFlagMatches(u32 pattern, u32 bits, u8 wait) {
|
|
|
|
// Is this in OR (any bit can match) or AND (all bits must match) mode?
|
|
|
|
if (wait & PSP_EVENT_WAITOR) {
|
|
|
|
return (bits & pattern) != 0;
|
|
|
|
} else {
|
|
|
|
return (bits & pattern) == bits;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __KernelApplyEventFlagMatch(u32_le *pattern, u32 bits, u8 wait, u32 outAddr) {
|
|
|
|
if (__KernelCheckEventFlagMatches(*pattern, bits, wait)) {
|
2012-12-15 09:36:53 +00:00
|
|
|
if (Memory::IsValidAddress(outAddr))
|
|
|
|
Memory::Write_U32(*pattern, outAddr);
|
|
|
|
|
|
|
|
if (wait & PSP_EVENT_WAITCLEAR)
|
|
|
|
*pattern &= ~bits;
|
2012-12-15 18:06:56 +00:00
|
|
|
if (wait & PSP_EVENT_WAITCLEARALL)
|
|
|
|
*pattern = 0;
|
2012-12-15 09:36:53 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
static bool __KernelUnlockEventFlagForThread(EventFlag *e, EventFlagTh &th, u32 &error, int result, bool &wokeThreads) {
|
2013-09-07 17:51:11 +00:00
|
|
|
if (!HLEKernel::VerifyWait(th.threadID, WAITTYPE_EVENTFLAG, e->GetUID()))
|
2012-12-15 09:36:53 +00:00
|
|
|
return true;
|
2012-12-15 05:25:29 +00:00
|
|
|
|
|
|
|
// If result is an error code, we're just letting it go.
|
2015-10-25 16:06:10 +00:00
|
|
|
if (result == 0) {
|
2015-10-25 15:28:07 +00:00
|
|
|
if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, th.bits, th.wait, th.outAddr))
|
2012-12-15 09:36:53 +00:00
|
|
|
return false;
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
2012-12-15 05:25:29 +00:00
|
|
|
// Otherwise, we set the current result since we're bailing.
|
|
|
|
if (Memory::IsValidAddress(th.outAddr))
|
|
|
|
Memory::Write_U32(e->nef.currentPattern, th.outAddr);
|
|
|
|
}
|
|
|
|
|
2013-09-07 17:51:11 +00:00
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(th.threadID, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (timeoutPtr != 0 && eventFlagWaitTimer != -1) {
|
2012-12-15 05:25:29 +00:00
|
|
|
// Remove any event for this thread.
|
2013-09-03 00:59:47 +00:00
|
|
|
s64 cyclesLeft = CoreTiming::UnscheduleEvent(eventFlagWaitTimer, th.threadID);
|
2012-12-15 05:25:29 +00:00
|
|
|
Memory::Write_U32((u32) cyclesToUs(cyclesLeft), timeoutPtr);
|
|
|
|
}
|
|
|
|
|
2013-09-03 00:59:47 +00:00
|
|
|
__KernelResumeThreadFromWait(th.threadID, result);
|
2012-12-15 05:25:29 +00:00
|
|
|
wokeThreads = true;
|
2012-12-15 09:36:53 +00:00
|
|
|
return true;
|
2012-12-15 05:25:29 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
static bool __KernelClearEventFlagThreads(EventFlag *e, int reason) {
|
2012-12-15 05:25:29 +00:00
|
|
|
u32 error;
|
|
|
|
bool wokeThreads = false;
|
|
|
|
std::vector<EventFlagTh>::iterator iter, end;
|
|
|
|
for (iter = e->waitingThreads.begin(), end = e->waitingThreads.end(); iter != end; ++iter)
|
|
|
|
__KernelUnlockEventFlagForThread(e, *iter, error, reason, wokeThreads);
|
|
|
|
e->waitingThreads.clear();
|
|
|
|
|
|
|
|
return wokeThreads;
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void __KernelEventFlagBeginCallback(SceUID threadID, SceUID prevCallbackId) {
|
2013-09-03 00:59:47 +00:00
|
|
|
auto result = HLEKernel::WaitBeginCallback<EventFlag, WAITTYPE_EVENTFLAG, EventFlagTh>(threadID, prevCallbackId, eventFlagWaitTimer);
|
|
|
|
if (result == HLEKernel::WAIT_CB_SUCCESS)
|
2014-06-30 02:03:35 +00:00
|
|
|
DEBUG_LOG(SCEKERNEL, "sceKernelWaitEventFlagCB: Suspending lock wait for callback");
|
2013-09-03 00:59:47 +00:00
|
|
|
else if (result == HLEKernel::WAIT_CB_BAD_WAIT_DATA)
|
2014-06-30 02:03:35 +00:00
|
|
|
ERROR_LOG_REPORT(SCEKERNEL, "sceKernelWaitEventFlagCB: wait not found to pause for callback");
|
2013-04-15 04:00:02 +00:00
|
|
|
else
|
2013-09-07 20:02:55 +00:00
|
|
|
WARN_LOG_REPORT(SCEKERNEL, "sceKernelWaitEventFlagCB: beginning callback with bad wait id?");
|
2013-04-15 04:00:02 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void __KernelEventFlagEndCallback(SceUID threadID, SceUID prevCallbackId) {
|
2013-09-03 00:59:47 +00:00
|
|
|
auto result = HLEKernel::WaitEndCallback<EventFlag, WAITTYPE_EVENTFLAG, EventFlagTh>(threadID, prevCallbackId, eventFlagWaitTimer, __KernelUnlockEventFlagForThread);
|
|
|
|
if (result == HLEKernel::WAIT_CB_RESUMED_WAIT)
|
2013-09-08 19:14:07 +00:00
|
|
|
DEBUG_LOG(SCEKERNEL, "sceKernelWaitEventFlagCB: Resuming lock wait from callback");
|
2013-04-15 04:00:02 +00:00
|
|
|
}
|
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
//SceUID sceKernelCreateEventFlag(const char *name, int attr, int bits, SceKernelEventFlagOptParam *opt);
|
2015-10-25 16:06:10 +00:00
|
|
|
int sceKernelCreateEventFlag(const char *name, u32 flag_attr, u32 flag_initPattern, u32 optPtr) {
|
|
|
|
if (!name) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ERROR, "invalid name");
|
2012-12-14 06:46:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// These attributes aren't valid.
|
2015-10-25 16:06:10 +00:00
|
|
|
if ((flag_attr & 0x100) != 0 || flag_attr >= 0x300) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_ATTR, "invalid attr parameter: %08x", flag_attr);
|
2012-12-14 06:46:22 +00:00
|
|
|
}
|
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
EventFlag *e = new EventFlag();
|
|
|
|
SceUID id = kernelObjects.Create(e);
|
|
|
|
|
|
|
|
e->nef.size = sizeof(NativeEventFlag);
|
2012-12-17 00:32:18 +00:00
|
|
|
strncpy(e->nef.name, name, KERNELOBJECT_MAX_NAME_LENGTH);
|
|
|
|
e->nef.name[KERNELOBJECT_MAX_NAME_LENGTH] = 0;
|
2012-11-12 00:23:25 +00:00
|
|
|
e->nef.attr = flag_attr;
|
|
|
|
e->nef.initPattern = flag_initPattern;
|
2012-11-01 15:19:01 +00:00
|
|
|
e->nef.currentPattern = e->nef.initPattern;
|
|
|
|
e->nef.numWaitThreads = 0;
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
if (optPtr != 0) {
|
2013-08-25 23:43:47 +00:00
|
|
|
u32 size = Memory::Read_U32(optPtr);
|
|
|
|
if (size > 4)
|
2013-09-07 20:02:55 +00:00
|
|
|
WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateEventFlag(%s) unsupported options parameter, size = %d", name, size);
|
2013-08-25 23:43:47 +00:00
|
|
|
}
|
2012-12-15 07:24:04 +00:00
|
|
|
if ((flag_attr & ~PSP_EVENT_WAITMULTIPLE) != 0)
|
2013-09-07 20:02:55 +00:00
|
|
|
WARN_LOG_REPORT(SCEKERNEL, "sceKernelCreateEventFlag(%s) unsupported attr parameter: %08x", name, flag_attr);
|
2012-12-14 06:46:22 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, id);
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
u32 sceKernelCancelEventFlag(SceUID uid, u32 pattern, u32 numWaitThreadsPtr) {
|
2012-12-15 05:25:29 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(uid, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2013-05-28 05:44:10 +00:00
|
|
|
e->nef.numWaitThreads = (int) e->waitingThreads.size();
|
2012-12-15 05:25:29 +00:00
|
|
|
if (Memory::IsValidAddress(numWaitThreadsPtr))
|
|
|
|
Memory::Write_U32(e->nef.numWaitThreads, numWaitThreadsPtr);
|
|
|
|
|
|
|
|
e->nef.currentPattern = pattern;
|
|
|
|
|
|
|
|
if (__KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_CANCEL))
|
|
|
|
hleReSchedule("event flag canceled");
|
|
|
|
|
2022-01-30 07:19:53 +00:00
|
|
|
hleEatCycles(580);
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, 0);
|
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-12-15 05:25:29 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
u32 sceKernelClearEventFlag(SceUID id, u32 bits) {
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2012-11-01 15:19:01 +00:00
|
|
|
e->nef.currentPattern &= bits;
|
|
|
|
// Note that it's not possible for threads to get woken up by this action.
|
2013-05-28 08:32:33 +00:00
|
|
|
hleEatCycles(430);
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, 0);
|
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
u32 sceKernelDeleteEventFlag(SceUID uid) {
|
2012-12-14 07:05:26 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(uid, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2012-12-15 05:25:29 +00:00
|
|
|
bool wokeThreads = __KernelClearEventFlagThreads(e, SCE_KERNEL_ERROR_WAIT_DELETE);
|
2012-12-14 07:05:26 +00:00
|
|
|
if (wokeThreads)
|
|
|
|
hleReSchedule("event flag deleted");
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, kernelObjects.Destroy<EventFlag>(uid));
|
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
u32 sceKernelSetEventFlag(SceUID id, u32 bitsToSet) {
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2012-11-01 15:19:01 +00:00
|
|
|
bool wokeThreads = false;
|
|
|
|
|
|
|
|
e->nef.currentPattern |= bitsToSet;
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
for (size_t i = 0; i < e->waitingThreads.size(); ++i) {
|
2012-11-01 15:19:01 +00:00
|
|
|
EventFlagTh *t = &e->waitingThreads[i];
|
2015-10-25 16:06:10 +00:00
|
|
|
if (__KernelUnlockEventFlagForThread(e, *t, error, 0, wokeThreads)) {
|
2012-11-01 15:19:01 +00:00
|
|
|
e->waitingThreads.erase(e->waitingThreads.begin() + i);
|
2012-12-15 05:25:29 +00:00
|
|
|
// Try the one that used to be in this place next.
|
|
|
|
--i;
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
2012-12-15 05:25:29 +00:00
|
|
|
|
|
|
|
if (wokeThreads)
|
|
|
|
hleReSchedule("event flag set");
|
|
|
|
|
2013-05-28 08:32:33 +00:00
|
|
|
hleEatCycles(430);
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, 0);
|
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
void __KernelEventFlagTimeout(u64 userdata, int cycleslate) {
|
2012-12-13 08:15:25 +00:00
|
|
|
SceUID threadID = (SceUID)userdata;
|
|
|
|
|
2013-09-04 05:27:28 +00:00
|
|
|
// This still needs to set the result pointer from the wait.
|
2012-12-13 08:15:25 +00:00
|
|
|
u32 error;
|
|
|
|
SceUID flagID = __KernelGetWaitID(threadID, WAITTYPE_EVENTFLAG, error);
|
2013-09-04 05:27:28 +00:00
|
|
|
u32 timeoutPtr = __KernelGetWaitTimeoutPtr(threadID, error);
|
2012-12-13 08:15:25 +00:00
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(flagID, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2013-09-04 05:27:28 +00:00
|
|
|
if (timeoutPtr != 0)
|
|
|
|
Memory::Write_U32(0, timeoutPtr);
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
for (size_t i = 0; i < e->waitingThreads.size(); i++) {
|
2012-12-13 08:15:25 +00:00
|
|
|
EventFlagTh *t = &e->waitingThreads[i];
|
2015-10-25 16:06:10 +00:00
|
|
|
if (t->threadID == threadID) {
|
2012-12-15 05:25:29 +00:00
|
|
|
bool wokeThreads;
|
|
|
|
|
|
|
|
// This thread isn't waiting anymore, but we'll remove it from waitingThreads later.
|
2012-12-15 09:36:53 +00:00
|
|
|
// The reason is, if it times out, but what it was waiting on is DELETED prior to it
|
|
|
|
// actually running, it will get a DELETE result instead of a TIMEOUT.
|
|
|
|
// So, we need to remember it or we won't be able to mark it DELETE instead later.
|
2012-12-15 05:25:29 +00:00
|
|
|
__KernelUnlockEventFlagForThread(e, *t, error, SCE_KERNEL_ERROR_WAIT_TIMEOUT, wokeThreads);
|
2012-12-13 08:15:25 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
static void __KernelSetEventFlagTimeout(EventFlag *e, u32 timeoutPtr) {
|
2013-01-18 06:04:52 +00:00
|
|
|
if (timeoutPtr == 0 || eventFlagWaitTimer == -1)
|
2012-12-13 08:15:25 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
int micro = (int) Memory::Read_U32(timeoutPtr);
|
|
|
|
|
2012-12-14 20:04:04 +00:00
|
|
|
// This seems like the actual timing of timeouts on hardware.
|
|
|
|
if (micro <= 1)
|
2014-06-27 07:58:59 +00:00
|
|
|
micro = 25;
|
2012-12-14 20:04:04 +00:00
|
|
|
else if (micro <= 209)
|
|
|
|
micro = 240;
|
2012-12-13 08:15:25 +00:00
|
|
|
|
|
|
|
// This should call __KernelEventFlagTimeout() later, unless we cancel it.
|
|
|
|
CoreTiming::ScheduleEvent(usToCycles(micro), eventFlagWaitTimer, __KernelGetCurThread());
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
int sceKernelWaitEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 timeoutPtr) {
|
|
|
|
if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
|
2012-12-14 20:04:04 +00:00
|
|
|
}
|
|
|
|
// Can't wait on 0, that's guaranteed to wait forever.
|
2015-10-25 16:06:10 +00:00
|
|
|
if (bits == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
if (!__KernelIsDispatchEnabled()) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch disabled");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2013-04-06 19:29:30 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2012-11-01 15:19:01 +00:00
|
|
|
EventFlagTh th;
|
2015-10-25 16:06:10 +00:00
|
|
|
if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr)) {
|
2012-12-15 09:36:53 +00:00
|
|
|
// If this thread was left in waitingThreads after a timeout, remove it.
|
|
|
|
// Otherwise we might write the outBitsPtr in the wrong place.
|
2013-09-09 04:41:54 +00:00
|
|
|
HLEKernel::RemoveWaitingThread(e->waitingThreads, __KernelGetCurThread());
|
2012-12-15 09:36:53 +00:00
|
|
|
|
2012-12-15 05:25:29 +00:00
|
|
|
u32 timeout = 0xFFFFFFFF;
|
2012-12-14 20:04:04 +00:00
|
|
|
if (Memory::IsValidAddress(timeoutPtr))
|
|
|
|
timeout = Memory::Read_U32(timeoutPtr);
|
|
|
|
|
|
|
|
// Do we allow more than one thread to wait?
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-20 03:58:32 +00:00
|
|
|
(void)hleLogSuccessI(SCEKERNEL, 0, "waiting");
|
2012-12-14 20:04:04 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
// No match - must wait.
|
2013-09-03 00:59:47 +00:00
|
|
|
th.threadID = __KernelGetCurThread();
|
2012-11-01 15:19:01 +00:00
|
|
|
th.bits = bits;
|
|
|
|
th.wait = wait;
|
2012-12-14 20:04:04 +00:00
|
|
|
// If < 5ms, sometimes hardware doesn't write this, but it's unpredictable.
|
2012-12-20 06:30:50 +00:00
|
|
|
th.outAddr = timeout == 0 ? 0 : outBitsPtr;
|
2012-11-01 15:19:01 +00:00
|
|
|
e->waitingThreads.push_back(th);
|
|
|
|
|
2012-12-13 08:15:25 +00:00
|
|
|
__KernelSetEventFlagTimeout(e, timeoutPtr);
|
2013-01-26 18:44:04 +00:00
|
|
|
__KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr, false, "event flag waited");
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
2015-12-20 03:58:32 +00:00
|
|
|
(void)hleLogSuccessI(SCEKERNEL, 0);
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
2012-12-13 08:15:25 +00:00
|
|
|
|
2022-01-30 07:19:53 +00:00
|
|
|
hleEatCycles(500);
|
2012-12-13 08:15:25 +00:00
|
|
|
return 0;
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
int sceKernelWaitEventFlagCB(SceUID id, u32 bits, u32 wait, u32 outBitsPtr, u32 timeoutPtr) {
|
|
|
|
if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
|
2012-12-14 20:04:04 +00:00
|
|
|
}
|
|
|
|
// Can't wait on 0, that's guaranteed to wait forever.
|
2015-10-25 16:06:10 +00:00
|
|
|
if (bits == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-12-14 20:04:04 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
if (!__KernelIsDispatchEnabled()) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_CAN_NOT_WAIT, "dispatch disabled");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2013-04-06 19:29:30 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2012-11-01 15:19:01 +00:00
|
|
|
EventFlagTh th;
|
2015-10-25 15:28:07 +00:00
|
|
|
// We only check, not apply here. This way the CLEAR/etc. options don't apply yet.
|
|
|
|
// If we run callbacks, we will check again after the callbacks complete.
|
|
|
|
bool doWait = !__KernelCheckEventFlagMatches(e->nef.currentPattern, bits, wait);
|
2013-04-15 04:31:41 +00:00
|
|
|
bool doCallbackWait = false;
|
2015-10-25 16:06:10 +00:00
|
|
|
if (__KernelCurHasReadyCallbacks()) {
|
2013-04-15 04:31:41 +00:00
|
|
|
doWait = true;
|
|
|
|
doCallbackWait = true;
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
if (doWait) {
|
2012-12-15 09:36:53 +00:00
|
|
|
// If this thread was left in waitingThreads after a timeout, remove it.
|
|
|
|
// Otherwise we might write the outBitsPtr in the wrong place.
|
2013-09-09 04:41:54 +00:00
|
|
|
HLEKernel::RemoveWaitingThread(e->waitingThreads, __KernelGetCurThread());
|
2012-12-15 09:36:53 +00:00
|
|
|
|
2012-12-15 05:25:29 +00:00
|
|
|
u32 timeout = 0xFFFFFFFF;
|
2012-12-14 20:04:04 +00:00
|
|
|
if (Memory::IsValidAddress(timeoutPtr))
|
|
|
|
timeout = Memory::Read_U32(timeoutPtr);
|
|
|
|
|
|
|
|
// Do we allow more than one thread to wait?
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-20 03:58:32 +00:00
|
|
|
(void)hleLogSuccessI(SCEKERNEL, 0, "waiting");
|
2012-12-14 20:04:04 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
// No match - must wait.
|
2013-09-03 00:59:47 +00:00
|
|
|
th.threadID = __KernelGetCurThread();
|
2012-11-01 15:19:01 +00:00
|
|
|
th.bits = bits;
|
|
|
|
th.wait = wait;
|
2012-12-14 20:04:04 +00:00
|
|
|
// If < 5ms, sometimes hardware doesn't write this, but it's unpredictable.
|
2012-12-20 06:30:50 +00:00
|
|
|
th.outAddr = timeout == 0 ? 0 : outBitsPtr;
|
2012-11-01 15:19:01 +00:00
|
|
|
e->waitingThreads.push_back(th);
|
|
|
|
|
2012-12-13 08:15:25 +00:00
|
|
|
__KernelSetEventFlagTimeout(e, timeoutPtr);
|
2013-04-15 04:31:41 +00:00
|
|
|
if (doCallbackWait)
|
|
|
|
__KernelWaitCallbacksCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr);
|
|
|
|
else
|
|
|
|
__KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeoutPtr, true, "event flag waited");
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
2015-12-20 03:58:32 +00:00
|
|
|
(void)hleLogSuccessI(SCEKERNEL, 0);
|
2015-10-25 15:28:07 +00:00
|
|
|
__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr);
|
2012-12-13 08:15:25 +00:00
|
|
|
hleCheckCurrentCallbacks();
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-12-13 08:15:25 +00:00
|
|
|
|
2022-01-30 07:19:53 +00:00
|
|
|
hleEatCycles(500);
|
2012-12-13 08:15:25 +00:00
|
|
|
return 0;
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
int sceKernelPollEventFlag(SceUID id, u32 bits, u32 wait, u32 outBitsPtr) {
|
|
|
|
if ((wait & ~PSP_EVENT_WAITKNOWN) != 0) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
|
2012-12-15 18:06:56 +00:00
|
|
|
}
|
|
|
|
// Poll seems to also fail when CLEAR and CLEARALL are used together, but not wait.
|
2015-10-25 16:06:10 +00:00
|
|
|
if ((wait & PSP_EVENT_WAITCLEAR) != 0 && (wait & PSP_EVENT_WAITCLEARALL) != 0) {
|
|
|
|
return hleReportWarning(SCEKERNEL, SCE_KERNEL_ERROR_ILLEGAL_MODE, "invalid mode parameter: %08x", wait);
|
2012-12-15 05:25:29 +00:00
|
|
|
}
|
|
|
|
// Can't wait on 0, it never matches.
|
2015-10-25 16:06:10 +00:00
|
|
|
if (bits == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_ILPAT, "bad pattern");
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-11-01 15:19:01 +00:00
|
|
|
|
2022-01-30 07:19:53 +00:00
|
|
|
hleEatCycles(360);
|
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
|
|
|
if (!__KernelApplyEventFlagMatch(&e->nef.currentPattern, bits, wait, outBitsPtr)) {
|
2012-11-01 15:19:01 +00:00
|
|
|
if (Memory::IsValidAddress(outBitsPtr))
|
|
|
|
Memory::Write_U32(e->nef.currentPattern, outBitsPtr);
|
2012-12-15 05:25:29 +00:00
|
|
|
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e->waitingThreads.size() > 0 && (e->nef.attr & PSP_EVENT_WAITMULTIPLE) == 0) {
|
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_MULTI);
|
2013-08-18 00:17:36 +00:00
|
|
|
}
|
2012-12-15 05:25:29 +00:00
|
|
|
|
2012-11-01 15:19:01 +00:00
|
|
|
// No match - return that, this is polling, not waiting.
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogDebug(SCEKERNEL, SCE_KERNEL_ERROR_EVF_COND);
|
|
|
|
} else {
|
|
|
|
return hleLogSuccessI(SCEKERNEL, 0);
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
2015-10-25 16:06:10 +00:00
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//int sceKernelReferEventFlagStatus(SceUID event, SceKernelEventFlagInfo *status);
|
2015-10-25 16:06:10 +00:00
|
|
|
u32 sceKernelReferEventFlagStatus(SceUID id, u32 statusPtr) {
|
2012-11-01 15:19:01 +00:00
|
|
|
u32 error;
|
|
|
|
EventFlag *e = kernelObjects.Get<EventFlag>(id, error);
|
2015-10-25 16:06:10 +00:00
|
|
|
if (e) {
|
2022-09-03 17:37:58 +00:00
|
|
|
auto status = PSPPointer<NativeEventFlag>::Create(statusPtr);
|
|
|
|
if (!status.IsValid())
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogWarning(SCEKERNEL, -1, "invalid ptr");
|
2013-05-18 20:46:50 +00:00
|
|
|
|
2013-09-09 04:29:39 +00:00
|
|
|
HLEKernel::CleanupWaitingThreads(WAITTYPE_EVENTFLAG, id, e->waitingThreads);
|
2013-05-28 05:44:10 +00:00
|
|
|
|
|
|
|
e->nef.numWaitThreads = (int) e->waitingThreads.size();
|
2022-09-03 17:37:58 +00:00
|
|
|
if (status->size != 0) {
|
|
|
|
*status = e->nef;
|
|
|
|
status.NotifyWrite("EventFlagStatus");
|
|
|
|
}
|
2015-10-25 16:06:10 +00:00
|
|
|
return hleLogSuccessI(SCEKERNEL, 0);
|
|
|
|
} else {
|
|
|
|
return hleLogDebug(SCEKERNEL, error, "invalid event flag");
|
2012-11-01 15:19:01 +00:00
|
|
|
}
|
|
|
|
}
|