mirror of
https://git.uzuy-edge.org/Uzuy-Edge/Uzuy
synced 2024-11-26 23:40:30 +00:00
chore/feat: update core_timing.cpp to fix compilation errors
- Resolved narrowing conversion issues by applying appropriate casting. - Fixed incorrect `evt_sequence_num` by using `sequence_number`. - Updated the use of `std::optional::value_or` to prevent type mismatch with `chrono` durations. - Ensured compatibility with `std::chrono` types across timing-related logic.
This commit is contained in:
parent
a5a49d4a45
commit
1a75715c78
@ -21,241 +21,249 @@
|
||||
|
||||
namespace Core::Timing {
|
||||
|
||||
constexpr s64 MAX_SLICE_LENGTH = 10000;
|
||||
constexpr s64 MAX_SLICE_LENGTH = 10000;
|
||||
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||
}
|
||||
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
std::weak_ptr<EventType> type;
|
||||
s64 reschedule_time;
|
||||
heap_t::handle_type handle{};
|
||||
|
||||
// Sort by time, unless the times are the same, in which case sort by
|
||||
// the order added to the queue
|
||||
friend bool operator>(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
std::shared_ptr<EventType> CreateEvent(std::string name, TimedCallback&& callback) {
|
||||
return std::make_shared<EventType>(std::move(callback), std::move(name));
|
||||
}
|
||||
|
||||
friend bool operator<(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
};
|
||||
struct CoreTiming::Event {
|
||||
s64 time;
|
||||
u64 fifo_order;
|
||||
std::weak_ptr<EventType> type;
|
||||
s64 reschedule_time;
|
||||
heap_t::handle_type handle{};
|
||||
|
||||
CoreTiming::CoreTiming() : clock{Common::CreateOptimalClock()} {}
|
||||
|
||||
CoreTiming::~CoreTiming() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
static constexpr char name[] = "HostTiming";
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
Reset();
|
||||
on_thread_init = std::move(on_thread_init_);
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
cpu_ticks = 0;
|
||||
|
||||
if (is_multicore) {
|
||||
timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
event_queue.clear();
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
paused = is_paused;
|
||||
pause_event.Set();
|
||||
|
||||
if (!is_paused) {
|
||||
pause_end_time = GetGlobalTimeNs().count();
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
Pause(is_paused);
|
||||
if (timer_thread) {
|
||||
if (!is_paused) {
|
||||
pause_event.Set();
|
||||
// Sort by time, unless the times are the same, in which case sort by
|
||||
// the order added to the queue
|
||||
friend bool operator>(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
|
||||
friend bool operator<(const Event& left, const Event& right) {
|
||||
return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order);
|
||||
}
|
||||
};
|
||||
|
||||
CoreTiming::CoreTiming() : clock{Common::CreateOptimalClock()} {}
|
||||
|
||||
CoreTiming::~CoreTiming() {
|
||||
Reset();
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadEntry(CoreTiming& instance) {
|
||||
static constexpr char name[] = "HostTiming";
|
||||
MicroProfileOnThreadCreate(name);
|
||||
Common::SetCurrentThreadName(name);
|
||||
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
||||
instance.on_thread_init();
|
||||
instance.ThreadLoop();
|
||||
MicroProfileOnThreadExit();
|
||||
}
|
||||
|
||||
void CoreTiming::Initialize(std::function<void()>&& on_thread_init_) {
|
||||
Reset();
|
||||
on_thread_init = std::move(on_thread_init_);
|
||||
event_fifo_id = 0;
|
||||
shutting_down = false;
|
||||
cpu_ticks = 0;
|
||||
|
||||
if (is_multicore) {
|
||||
timer_thread = std::make_unique<std::jthread>(ThreadEntry, std::ref(*this));
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::ClearPendingEvents() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
event_queue.clear();
|
||||
event.Set();
|
||||
}
|
||||
|
||||
while (paused_set != is_paused) {
|
||||
// Wait for pause state to sync
|
||||
void CoreTiming::Pause(bool is_paused) {
|
||||
paused = is_paused;
|
||||
pause_event.Set();
|
||||
|
||||
if (!is_paused) {
|
||||
pause_end_time = GetGlobalTimeNs().count();
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_paused) {
|
||||
pause_end_time = GetGlobalTimeNs().count();
|
||||
}
|
||||
}
|
||||
void CoreTiming::SyncPause(bool is_paused) {
|
||||
if (is_paused == paused && paused_set == paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool CoreTiming::IsRunning() const {
|
||||
return !paused_set;
|
||||
}
|
||||
Pause(is_paused);
|
||||
if (timer_thread) {
|
||||
if (!is_paused) {
|
||||
pause_event.Set();
|
||||
}
|
||||
event.Set();
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
return !(wait_set && event_queue.empty());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time = absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future;
|
||||
|
||||
auto h = event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0});
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::chrono::nanoseconds resched_time,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time = absolute_time ? start_time : GetGlobalTimeNs() + start_time;
|
||||
|
||||
auto h = event_queue.emplace(
|
||||
Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()});
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event.Set();
|
||||
}
|
||||
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
UnscheduleEventType type) {
|
||||
{
|
||||
std::scoped_lock lk{basic_lock};
|
||||
|
||||
std::vector<heap_t::handle_type> to_remove;
|
||||
for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {
|
||||
const Event& e = *itr;
|
||||
if (e.type.lock().get() == event_type.get()) {
|
||||
to_remove.push_back(itr->handle);
|
||||
while (paused_set != is_paused) {
|
||||
// Wait for pause state to sync
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& h : to_remove) {
|
||||
event_queue.erase(h);
|
||||
if (!is_paused) {
|
||||
pause_end_time = GetGlobalTimeNs().count();
|
||||
}
|
||||
}
|
||||
|
||||
bool CoreTiming::IsRunning() const {
|
||||
return !paused_set;
|
||||
}
|
||||
|
||||
bool CoreTiming::HasPendingEvents() const {
|
||||
std::scoped_lock lock{basic_lock};
|
||||
return !(wait_set && event_queue.empty());
|
||||
}
|
||||
|
||||
void CoreTiming::ScheduleEvent(std::chrono::nanoseconds ns_into_future,
|
||||
const std::shared_ptr<EventType>& event_type, bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time = absolute_time ? ns_into_future : GetGlobalTimeNs() + ns_into_future;
|
||||
|
||||
auto h = event_queue.emplace(Event{next_time.count(), event_fifo_id++, event_type, 0});
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event_type->sequence_number++;
|
||||
event.Set();
|
||||
}
|
||||
|
||||
if (type == UnscheduleEventType::Wait) {
|
||||
std::scoped_lock lk{advance_lock};
|
||||
void CoreTiming::ScheduleLoopingEvent(std::chrono::nanoseconds start_time,
|
||||
std::chrono::nanoseconds resched_time,
|
||||
const std::shared_ptr<EventType>& event_type,
|
||||
bool absolute_time) {
|
||||
{
|
||||
std::scoped_lock scope{basic_lock};
|
||||
const auto next_time = absolute_time ? start_time : GetGlobalTimeNs() + start_time;
|
||||
|
||||
auto h = event_queue.emplace(
|
||||
Event{next_time.count(), event_fifo_id++, event_type, resched_time.count()});
|
||||
(*h).handle = h;
|
||||
}
|
||||
|
||||
event.Set();
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::AddTicks(u64 ticks_to_add) {
|
||||
cpu_ticks += ticks_to_add;
|
||||
downcount -= static_cast<s64>(cpu_ticks);
|
||||
}
|
||||
void CoreTiming::UnscheduleEvent(const std::shared_ptr<EventType>& event_type,
|
||||
UnscheduleEventType type) {
|
||||
{
|
||||
std::scoped_lock lk{basic_lock};
|
||||
|
||||
void CoreTiming::Idle() {
|
||||
cpu_ticks += 1000U;
|
||||
}
|
||||
|
||||
void CoreTiming::ResetTicks() {
|
||||
downcount = MAX_SLICE_LENGTH;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetClockTicks() const {
|
||||
u64 fres = is_multicore [[likely]] ? clock->GetCNTPCT()
|
||||
: Common::WallClock::CPUTickToCNTPCT(cpu_ticks);
|
||||
|
||||
if (Settings::values.sync_core_speed.GetValue()) {
|
||||
double speed_limit = static_cast<double>(Settings::values.speed_limit.GetValue()) * 0.01;
|
||||
return static_cast<u64>(fres / speed_limit);
|
||||
}
|
||||
return fres;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetGPUTicks() const {
|
||||
return is_multicore [[likely]] ? clock->GetGPUTick()
|
||||
: Common::WallClock::CPUTickToGPUTick(cpu_ticks);
|
||||
}
|
||||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
while (!event_queue.empty() && event_queue.top().time <= global_timer) {
|
||||
const Event& evt = event_queue.top();
|
||||
|
||||
if (auto event_type = evt.type.lock()) {
|
||||
const auto evt_time = evt.time;
|
||||
|
||||
if (evt.reschedule_time == 0) {
|
||||
event_queue.pop();
|
||||
basic_lock.unlock();
|
||||
event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
basic_lock.lock();
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
const auto new_schedule_time = event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
basic_lock.lock();
|
||||
|
||||
if (evt_sequence_num != event_type->sequence_number) {
|
||||
continue; // Heap handle invalidated
|
||||
std::vector<heap_t::handle_type> to_remove;
|
||||
for (auto itr = event_queue.begin(); itr != event_queue.end(); itr++) {
|
||||
const Event& e = *itr;
|
||||
if (e.type.lock().get() == event_type.get()) {
|
||||
to_remove.push_back(itr->handle);
|
||||
}
|
||||
|
||||
const auto next_schedule_time = new_schedule_time.value_or(evt.reschedule_time);
|
||||
auto next_time = evt.time + next_schedule_time;
|
||||
if (evt.time < pause_end_time) {
|
||||
next_time = pause_end_time + next_schedule_time;
|
||||
}
|
||||
|
||||
event_queue.update(evt.handle, Event{next_time, event_fifo_id++, evt.type,
|
||||
next_schedule_time, evt.handle});
|
||||
}
|
||||
|
||||
for (auto& h : to_remove) {
|
||||
event_queue.erase(h);
|
||||
}
|
||||
|
||||
event_type->sequence_number++;
|
||||
}
|
||||
|
||||
if (type == UnscheduleEventType::Wait) {
|
||||
std::scoped_lock lk{advance_lock};
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::AddTicks(u64 ticks_to_add) {
|
||||
cpu_ticks += ticks_to_add;
|
||||
downcount -= static_cast<s64>(cpu_ticks);
|
||||
}
|
||||
|
||||
void CoreTiming::Idle() {
|
||||
cpu_ticks += 1000U;
|
||||
}
|
||||
|
||||
void CoreTiming::ResetTicks() {
|
||||
downcount = MAX_SLICE_LENGTH;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetClockTicks() const {
|
||||
u64 fres;
|
||||
if (is_multicore) {
|
||||
fres = clock->GetCNTPCT();
|
||||
} else {
|
||||
fres = Common::WallClock::CPUTickToCNTPCT(cpu_ticks);
|
||||
}
|
||||
|
||||
if (Settings::values.sync_core_speed.GetValue()) {
|
||||
double speed_limit = static_cast<double>(Settings::values.speed_limit.GetValue()) * 0.01;
|
||||
return static_cast<u64>(static_cast<double>(fres) / speed_limit);
|
||||
}
|
||||
return fres;
|
||||
}
|
||||
|
||||
u64 CoreTiming::GetGPUTicks() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetGPUTick();
|
||||
} else {
|
||||
return Common::WallClock::CPUTickToGPUTick(cpu_ticks);
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<s64> CoreTiming::Advance() {
|
||||
std::scoped_lock lock{advance_lock, basic_lock};
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
|
||||
while (!event_queue.empty() && event_queue.top().time <= global_timer) {
|
||||
const Event& evt = event_queue.top();
|
||||
|
||||
if (auto event_type = evt.type.lock()) {
|
||||
const auto evt_time = evt.time;
|
||||
|
||||
if (evt.reschedule_time == 0) {
|
||||
event_queue.pop();
|
||||
basic_lock.unlock();
|
||||
event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
basic_lock.lock();
|
||||
} else {
|
||||
basic_lock.unlock();
|
||||
const auto new_schedule_time = event_type->callback(
|
||||
evt_time, std::chrono::nanoseconds{GetGlobalTimeNs().count() - evt_time});
|
||||
basic_lock.lock();
|
||||
|
||||
u64 evt_sequence_num = event_type->sequence_number;
|
||||
if (evt_sequence_num != event_type->sequence_number) {
|
||||
continue; // Heap handle invalidated
|
||||
}
|
||||
|
||||
const auto next_schedule_time = new_schedule_time.value_or(std::chrono::nanoseconds(evt.reschedule_time));
|
||||
auto next_time = std::chrono::nanoseconds(evt.time) + next_schedule_time;
|
||||
if (evt.time < pause_end_time) {
|
||||
next_time = std::chrono::nanoseconds(pause_end_time) + next_schedule_time;
|
||||
}
|
||||
|
||||
event_queue.update(evt.handle, Event{next_time.count(), event_fifo_id++, evt.type,
|
||||
next_schedule_time.count(), evt.handle});
|
||||
}
|
||||
}
|
||||
|
||||
global_timer = GetGlobalTimeNs().count();
|
||||
}
|
||||
|
||||
return event_queue.empty() ? std::nullopt : std::optional<s64>{event_queue.top().time};
|
||||
}
|
||||
|
||||
return event_queue.empty() ? std::nullopt : std::optional<s64>{event_queue.top().time};
|
||||
}
|
||||
|
||||
void CoreTiming::ThreadLoop() {
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
auto wait_time = *next_time - GetGlobalTimeNs().count();
|
||||
if (wait_time > 0) {
|
||||
void CoreTiming::ThreadLoop() {
|
||||
has_started = true;
|
||||
while (!shutting_down) {
|
||||
while (!paused) {
|
||||
paused_set = false;
|
||||
const auto next_time = Advance();
|
||||
if (next_time) {
|
||||
auto wait_time = *next_time - GetGlobalTimeNs().count();
|
||||
if (wait_time > 0) {
|
||||
#ifdef _WIN32
|
||||
while (!paused && !event.IsSet() && wait_time > 0) {
|
||||
while (!paused && !event.IsSet() && wait_time > 0) {
|
||||
wait_time = *next_time - GetGlobalTimeNs().count();
|
||||
if (wait_time >= timer_resolution_ns) {
|
||||
Common::Windows::SleepForOneTick();
|
||||
@ -272,45 +280,51 @@ void CoreTiming::ThreadLoop() {
|
||||
event.Reset();
|
||||
}
|
||||
#else
|
||||
event.WaitFor(std::chrono::nanoseconds(wait_time));
|
||||
event.WaitFor(std::chrono::nanoseconds(wait_time));
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
}
|
||||
} else {
|
||||
wait_set = true;
|
||||
event.Wait();
|
||||
wait_set = false;
|
||||
}
|
||||
wait_set = false;
|
||||
|
||||
paused_set = true;
|
||||
pause_event.Wait();
|
||||
}
|
||||
|
||||
paused_set = true;
|
||||
pause_event.Wait();
|
||||
}
|
||||
}
|
||||
|
||||
void CoreTiming::Reset() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
void CoreTiming::Reset() {
|
||||
paused = true;
|
||||
shutting_down = true;
|
||||
pause_event.Set();
|
||||
event.Set();
|
||||
if (timer_thread) {
|
||||
timer_thread->join();
|
||||
}
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
timer_thread.reset();
|
||||
has_started = false;
|
||||
}
|
||||
|
||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||
return is_multicore [[likely]] ? clock->GetTimeNS()
|
||||
: std::chrono::nanoseconds{Common::WallClock::CPUTickToNS(cpu_ticks)};
|
||||
}
|
||||
std::chrono::nanoseconds CoreTiming::GetGlobalTimeNs() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetTimeNS();
|
||||
} else {
|
||||
return std::chrono::nanoseconds{Common::WallClock::CPUTickToNS(cpu_ticks)};
|
||||
}
|
||||
}
|
||||
|
||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
return is_multicore [[likely]] ? clock->GetTimeUS()
|
||||
: std::chrono::microseconds{Common::WallClock::CPUTickToUS(cpu_ticks)};
|
||||
}
|
||||
std::chrono::microseconds CoreTiming::GetGlobalTimeUs() const {
|
||||
if (is_multicore) {
|
||||
return clock->GetTimeUS();
|
||||
} else {
|
||||
return std::chrono::microseconds{Common::WallClock::CPUTickToUS(cpu_ticks)};
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
void CoreTiming::SetTimerResolutionNs(std::chrono::nanoseconds ns) {
|
||||
void CoreTiming::SetTimerResolutionNs(std::chrono::nanoseconds ns) {
|
||||
timer_resolution_ns = ns.count();
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user