Saves user RSP on syscall (#1072)
Some checks are pending
Development Build / Build (push) Waiting to run
Development Build / Update PRs (push) Waiting to run

This commit is contained in:
Putta Khunchalee 2024-10-27 17:34:33 +07:00 committed by GitHub
parent 10c0440d15
commit 939df1891b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 232 additions and 184 deletions

View File

@ -1,25 +1,26 @@
use super::Base;
use crate::proc::Thread;
/// Extended [Context](super::Context) for AArch64.
/// Extended [Base] for AArch64.
#[repr(C)]
pub struct Context {
base: super::Context, // Must be first field.
pub(super) struct Context {
base: Base, // Must be first field.
}
impl Context {
pub fn new(base: super::Context) -> Self {
pub fn new(base: Base) -> Self {
Self { base }
}
}
pub unsafe fn activate(_: *mut Context) {
todo!();
}
pub unsafe fn activate(&mut self) {
todo!();
}
pub unsafe fn load_fixed_ptr<const O: usize, T>() -> *const T {
todo!()
}
pub unsafe fn load_fixed_ptr<const O: usize, T>() -> *const T {
todo!()
}
pub unsafe fn load_usize<const O: usize>() -> usize {
todo!()
pub unsafe fn load_usize<const O: usize>() -> usize {
todo!()
}
}

View File

@ -1,13 +1,13 @@
use super::{Context, PinnedContext};
use super::{pin_cpu, PinnedContext};
use crate::config::config;
use alloc::vec::Vec;
use core::ops::Deref;
/// Encapsulates per-CPU value.
///
/// In theory you can use `RefCell` to have a mutable access to the value but it is prone to panic
/// because the CPU is allowed to switch to the other thread, which will panic if the new thread
/// attemp to lock the same `RefCell`.
/// Use `RefCell` if you need interior mutability but it will make that code not safe to call from
/// any interrupt handler. You can't use mutex here because once the thread is pinned to a CPU it
/// cannot go to sleep.
pub struct CpuLocal<T>(Vec<T>);
impl<T> CpuLocal<T> {
@ -22,14 +22,19 @@ impl<T> CpuLocal<T> {
Self(vec)
}
/// The calling thread cannot go to sleep until the returned [`CpuLock`] is dropped. Attempt to
/// call any function that can put the thread to sleep will be panic.
pub fn lock(&self) -> CpuLock<T> {
let pin = Context::pin();
let pin = pin_cpu();
let val = &self.0[unsafe { pin.cpu() }];
CpuLock { val, pin }
}
}
unsafe impl<T: Send> Send for CpuLocal<T> {}
unsafe impl<T: Send> Sync for CpuLocal<T> {}
/// RAII struct to access per-CPU value in [`CpuLocal`].
pub struct CpuLock<'a, T> {
val: &'a T,

View File

@ -1,12 +1,13 @@
pub use self::arc::*;
pub use self::arch::*;
pub use self::local::*;
use self::arch::{load_fixed_ptr, load_usize};
use crate::proc::{ProcMgr, Thread};
use alloc::rc::Rc;
use alloc::sync::Arc;
use core::marker::PhantomData;
use core::mem::offset_of;
use core::sync::atomic::Ordering;
mod arc;
#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
@ -27,22 +28,58 @@ mod local;
pub unsafe fn run_with_context(cpu: usize, td: Arc<Thread>, pmgr: Arc<ProcMgr>, f: fn() -> !) -> ! {
// We use a different mechanism here. The PS4 put all of pcpu at a global level but we put it on
// each CPU stack instead.
let mut cx = self::arch::Context::new(Context {
let mut cx = Context::new(Base {
cpu,
thread: Arc::into_raw(td),
pmgr: Arc::into_raw(pmgr),
});
self::arch::activate(&mut cx);
cx.activate();
f();
}
/// # Interrupt safety
/// This function is interrupt safe.
pub fn current_thread() -> BorrowedArc<Thread> {
// It does not matter if we are on a different CPU after we load the Context::thread because it
// is going to be the same one since it represent the current thread.
unsafe { BorrowedArc::new(Context::load_fixed_ptr::<{ offset_of!(Base, thread) }, _>()) }
}
/// # Interrupt safety
/// This function is interrupt safe.
pub fn current_procmgr() -> BorrowedArc<ProcMgr> {
// It does not matter if we are on a different CPU after we load the Context::pmgr because it is
// always the same for all CPU.
unsafe { BorrowedArc::new(Context::load_fixed_ptr::<{ offset_of!(Base, pmgr) }, _>()) }
}
/// Pin the calling thread to one CPU.
///
/// This thread will never switch to a different CPU until the returned [`PinnedContext`] is dropped
/// and it is not allowed to sleep.
///
/// See `critical_enter` and `critical_exit` on the PS4 for a reference. Beware that our
/// implementation a bit different. The PS4 **allow the thread to sleep but we don't**.
pub fn pin_cpu() -> PinnedContext {
let td = current_thread();
// Prevent all operations after this to get executed before this line. See
// https://github.com/rust-lang/rust/issues/130655#issuecomment-2365189317 for the explanation.
unsafe { td.active_pins().fetch_add(1, Ordering::Acquire) };
PinnedContext {
td,
phantom: PhantomData,
}
}
/// Implementation of `pcpu` structure.
///
/// Access to this structure must be done by **atomic reading or writing its field directly**. It is
/// not safe to have a temporary a pointer or reference to this struct or its field because the CPU
/// might get interupted, which mean it is possible for the next instruction to get executed on
/// a different CPU if the interupt cause the CPU to switch the task.
/// might get interrupted, which mean it is possible for the next instruction to get executed on
/// a different CPU if the interrupt cause the CPU to switch the task.
///
/// The activation of this struct is a minimum requirements for a new CPU to call most of the other
/// functions. The new CPU should call [`run_with_context()`] as soon as possible. We don't make the
@ -56,47 +93,13 @@ pub unsafe fn run_with_context(cpu: usize, td: Arc<Thread>, pmgr: Arc<ProcMgr>,
/// main CPU** because the only kernel functions it can call into is either stage 1 allocator or
/// panic handler, both of them does not require a CPU context.
#[repr(C)]
pub struct Context {
struct Base {
cpu: usize, // pc_cpuid
thread: *const Thread, // pc_curthread
pmgr: *const ProcMgr,
}
impl Context {
/// # Interupt safety
/// This function is interupt safe.
pub fn thread() -> BorrowedArc<Thread> {
// It does not matter if we are on a different CPU after we load the Context::thread because
// it is going to be the same one since it represent the current thread.
unsafe { BorrowedArc::new(load_fixed_ptr::<{ offset_of!(Self, thread) }, _>()) }
}
pub fn procs() -> BorrowedArc<ProcMgr> {
// It does not matter if we are on a different CPU after we load the Context::pmgr because
// it is always the same for all CPU.
unsafe { BorrowedArc::new(load_fixed_ptr::<{ offset_of!(Self, pmgr) }, _>()) }
}
/// Pin the calling thread to one CPU.
///
/// This thread will never switch to a different CPU until the returned [`PinnedContext`] is
/// dropped and it is not allowed to sleep.
///
/// See `critical_enter` and `critical_exit` on the PS4 for a reference. Beware that our
/// implementation a bit different. The PS4 **allow the thread to sleep but we don't**.
pub fn pin() -> PinnedContext {
let td = Self::thread();
unsafe { *td.critical_sections_mut() += 1 };
PinnedContext {
td,
phantom: PhantomData,
}
}
}
impl Drop for Context {
impl Drop for Base {
fn drop(&mut self) {
panic!("dropping Context can cause a bug so it is not supported");
}
@ -107,7 +110,7 @@ impl Drop for Context {
/// This struct must not implement [`Send`] and [`Sync`].
pub struct PinnedContext {
td: BorrowedArc<Thread>,
phantom: PhantomData<Rc<()>>, // For !Send and !Sync.
phantom: PhantomData<Rc<()>>, // Make sure we are !Send and !Sync.
}
impl PinnedContext {
@ -117,13 +120,15 @@ impl PinnedContext {
/// Anything that derive from the returned value will invalid when this [`PinnedContext`]
/// dropped.
pub unsafe fn cpu(&self) -> usize {
load_usize::<{ offset_of!(Context, cpu) }>()
Context::load_usize::<{ offset_of!(Base, cpu) }>()
}
}
impl Drop for PinnedContext {
fn drop(&mut self) {
unsafe { *self.td.critical_sections_mut() -= 1 };
// Prevent all operations before this to get executed after this line. See
// https://github.com/rust-lang/rust/issues/130655#issuecomment-2365189317 for the explanation.
unsafe { self.td.active_pins().fetch_sub(1, Ordering::Release) };
// TODO: Implement td_owepreempt.
}

View File

@ -1,52 +1,64 @@
use super::Base;
use crate::arch::wrmsr;
use core::arch::asm;
use core::mem::offset_of;
/// Extended [Context](super::Context) for x86-64.
pub const fn current_user_rsp_offset() -> usize {
offset_of!(Context, user_rsp)
}
/// Extended [Base] for x86-64.
#[repr(C)]
pub struct Context {
base: super::Context, // Must be first field.
pub(super) struct Context {
base: Base, // Must be first field.
user_rsp: usize, // pc_scratch_rsp
}
impl Context {
pub fn new(base: super::Context) -> Self {
Self { base }
pub fn new(base: Base) -> Self {
Self { base, user_rsp: 0 }
}
/// Set kernel `GS` segment register to `cx`.
///
/// At a glance this may looks incorrect due to `0xc0000102` is `KERNEL_GS_BAS` according to the
/// docs. The problem is the CPU always use the value from `0xc0000101` regardless the current
/// privilege level. That mean `KERNEL_GS_BAS` is the name when the CPU currently on the user
/// space.
///
/// This also set user-mode `FS` and `GS` to null.
pub unsafe fn activate(&mut self) {
// Set GS for kernel mode.
wrmsr(0xc0000101, self as *mut Self as usize);
// Clear FS and GS for user mode.
wrmsr(0xc0000100, 0);
wrmsr(0xc0000102, 0);
}
pub unsafe fn load_fixed_ptr<const O: usize, T>() -> *const T {
let mut v;
asm!(
"mov {out}, gs:[{off}]",
off = const O,
out = out(reg) v,
options(pure, nomem, preserves_flags, nostack)
);
v
}
pub unsafe fn load_usize<const O: usize>() -> usize {
let mut v;
asm!(
"mov {out}, gs:[{off}]",
off = const O,
out = out(reg) v,
options(preserves_flags, nostack)
);
v
}
}
/// Set kernel `GS` segment register to `cx`.
///
/// This also set user-mode `FS` and `GS` to null.
pub unsafe fn activate(cx: *mut Context) {
// Set GS for kernel mode.
wrmsr(0xc0000101, cx as usize);
// Clear FS and GS for user mode.
wrmsr(0xc0000100, 0);
wrmsr(0xc0000102, 0);
}
pub unsafe fn load_fixed_ptr<const O: usize, T>() -> *const T {
let mut v;
asm!(
"mov {out}, gs:[{off}]",
off = const O,
out = out(reg) v,
options(pure, nomem, preserves_flags, nostack)
);
v
}
pub unsafe fn load_usize<const O: usize>() -> usize {
let mut v;
asm!(
"mov {out}, gs:[{off}]",
off = const O,
out = out(reg) v,
options(preserves_flags, nostack)
);
v
}

View File

@ -1,5 +1,5 @@
use super::MTX_UNOWNED;
use crate::context::{BorrowedArc, Context};
use crate::context::{current_thread, BorrowedArc};
use alloc::rc::Rc;
use alloc::sync::Arc;
use core::cell::UnsafeCell;
@ -87,20 +87,20 @@ pub struct GutexGroup {
}
impl GutexGroup {
/// # Context safety
/// This function does not require a CPU context.
pub fn new() -> Arc<Self> {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Arc::new(Self {
owning: AtomicUsize::new(MTX_UNOWNED),
active: UnsafeCell::new(0),
})
}
pub fn spawn<T>(self: &Arc<Self>, value: T) -> Gutex<T> {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
/// # Context safety
/// This function does not require a CPU context.
pub fn spawn<T>(self: Arc<Self>, value: T) -> Gutex<T> {
Gutex {
group: self.clone(),
group: self,
active: UnsafeCell::new(0),
value: UnsafeCell::new(value),
}
@ -109,7 +109,7 @@ impl GutexGroup {
#[inline(never)]
fn lock(&self) -> GroupGuard {
// Check if the calling thread already own the lock.
let td = Context::thread();
let td = current_thread();
let id = BorrowedArc::as_ptr(&td) as usize;
if id == self.owning.load(Ordering::Relaxed) {

View File

@ -1,5 +1,5 @@
use super::MTX_UNOWNED;
use crate::context::{BorrowedArc, Context};
use crate::context::{current_thread, BorrowedArc};
use alloc::rc::Rc;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
@ -27,11 +27,11 @@ impl<T> Mutex<T> {
/// See `_mtx_lock_flags` on the PS4 for a reference.
pub fn lock(&self) -> MutexGuard<T> {
// Disallow locking in an interupt handler.
let td = Context::thread();
// Check if the current thread can sleep.
let td = current_thread();
if td.active_interrupts() != 0 {
panic!("locking a mutex in an interupt handler is not supported");
if !td.can_sleep() {
panic!("locking a mutex in a non-sleeping context is not supported");
}
// Take ownership.
@ -48,7 +48,7 @@ impl<T> Mutex<T> {
todo!()
}
td.active_mutexes().fetch_add(1, Ordering::Relaxed);
*td.active_mutexes_mut() += 1;
MutexGuard {
data: self.data.get(),
@ -62,9 +62,9 @@ impl<T> Mutex<T> {
/// # Safety
/// Must be called by the thread that own `lock`.
unsafe fn unlock(lock: &AtomicUsize) {
let td = Context::thread();
let td = current_thread();
td.active_mutexes().fetch_sub(1, Ordering::Relaxed);
*td.active_mutexes_mut() -= 1;
// TODO: There is a check for (m->lock_object).lo_data == 0 on the PS4.
if lock

View File

@ -1,7 +1,7 @@
#![no_std]
#![cfg_attr(not(test), no_main)]
use crate::context::Context;
use crate::context::current_procmgr;
use crate::malloc::KernelHeap;
use crate::proc::{ProcMgr, Thread};
use crate::sched::sleep;
@ -71,9 +71,10 @@ fn main() -> ! {
// See scheduler() function on the PS4 for a reference. Actually it should be called swapper
// instead.
// TODO: Subscribe to "system_suspend_phase2_pre_sync" and "system_resume_phase2" event.
let procs = current_procmgr();
loop {
// TODO: Implement a call to vm_page_count_min().
let procs = Context::procs();
let procs = procs.list();
if procs.len() == 0 {

View File

@ -1,12 +1,12 @@
use crate::config::{config, PAGE_SIZE};
use crate::context::Context;
use crate::config::PAGE_SIZE;
use crate::context::{current_thread, CpuLocal};
use crate::uma::UmaZone;
use alloc::string::ToString;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::alloc::Layout;
use core::cell::RefCell;
use core::num::NonZero;
use core::sync::atomic::{AtomicU64, Ordering};
/// Stage 2 kernel heap.
///
@ -14,7 +14,7 @@ use core::sync::atomic::{AtomicU64, Ordering};
/// `malloc_type` and `malloc_type_internal` structure.
pub struct Stage2 {
zones: [Vec<Arc<UmaZone>>; (usize::BITS - 1) as usize], // kmemsize + kmemzones
stats: Vec<Stats>, // mti_stats
stats: CpuLocal<RefCell<Stats>>, // mti_stats
}
impl Stage2 {
@ -57,14 +57,10 @@ impl Stage2 {
zones
});
// TODO: Is there a better way than this?
let mut stats = Vec::with_capacity(config().max_cpu.get());
for _ in 0..config().max_cpu.get() {
stats.push(Stats::default());
Self {
zones,
stats: CpuLocal::new(|_| RefCell::default()),
}
Self { zones, stats }
}
/// Returns null on failure.
@ -75,10 +71,10 @@ impl Stage2 {
/// `layout` must be nonzero.
pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Our implementation imply M_WAITOK.
let td = Context::thread();
let td = current_thread();
if td.active_interrupts() != 0 {
panic!("heap allocation in an interrupt handler is not supported");
if !td.can_sleep() {
panic!("heap allocation in a non-sleeping context is not supported");
}
// Determine how to allocate.
@ -99,15 +95,16 @@ impl Stage2 {
let mem = zone.alloc();
// Update stats.
let cx = Context::pin();
let stats = &self.stats[cx.cpu()];
let stats = self.stats.lock();
let mut stats = stats.borrow_mut();
let size = if mem.is_null() { 0 } else { zone.size().get() };
if size != 0 {
stats
stats.alloc_bytes = stats
.alloc_bytes
.fetch_add(size.try_into().unwrap(), Ordering::Relaxed);
stats.alloc_count.fetch_add(1, Ordering::Relaxed);
.checked_add(size.try_into().unwrap())
.unwrap();
stats.alloc_count += 1;
}
// TODO: How to update mts_size here since our zone table also indexed by alignment?
@ -128,6 +125,6 @@ impl Stage2 {
/// Implementation of `malloc_type_stats` structure.
#[derive(Default)]
struct Stats {
alloc_bytes: AtomicU64, // mts_memalloced
alloc_count: AtomicU64, // mts_numallocs
alloc_bytes: u64, // mts_memalloced
alloc_count: u64, // mts_numallocs
}

View File

@ -1,7 +1,7 @@
use self::cell::PrivateCell;
use crate::lock::{Gutex, GutexGroup, GutexWriteGuard};
use core::cell::RefMut;
use core::sync::atomic::AtomicU16;
use core::sync::atomic::{AtomicU8, Ordering};
mod cell;
@ -12,52 +12,64 @@ mod cell;
///
/// We subtitute `TDP_NOSLEEPING` with `td_intr_nesting_level` and `td_critnest` since it is the
/// only cases the thread should not allow to sleep.
///
/// Do not try to access any [`PrivateCell`] fields from interrupt handler because it might
/// currently locked, which will can cause a panic.
pub struct Thread {
critical_sections: PrivateCell<u32>, // td_critnest
active_interrupts: usize, // td_intr_nesting_level
active_mutexes: AtomicU16, // td_locks
sleeping: Gutex<usize>, // td_wchan
active_pins: AtomicU8, // td_critnest
active_interrupts: AtomicU8, // td_intr_nesting_level
active_mutexes: PrivateCell<u16>, // td_locks
sleeping: Gutex<usize>, // td_wchan
}
impl Thread {
/// # Context safety
/// This function does not require a CPU context.
///
/// # Safety
/// This function does not do anything except initialize the struct memory. It is the caller
/// responsibility to configure the thread after this so it have a proper states and trigger
/// necessary events.
pub unsafe fn new_bare() -> Self {
///
/// # Context safety
/// This function does not require a CPU context.
pub fn new_bare() -> Self {
// td_critnest on the PS4 started with 1 but this does not work in our case because we use
// RAII to increase and decrease it.
let gg = GutexGroup::new();
Self {
critical_sections: PrivateCell::new(0),
active_interrupts: 0,
active_mutexes: AtomicU16::new(0),
active_pins: AtomicU8::new(0),
active_interrupts: AtomicU8::new(0),
active_mutexes: PrivateCell::new(0),
sleeping: gg.spawn(0),
}
}
/// See [`crate::context::Context::pin()`] for a safe wrapper.
pub fn can_sleep(&self) -> bool {
let active_pins = self.active_pins.load(Ordering::Relaxed);
let active_interrupts = self.active_interrupts.load(Ordering::Relaxed);
active_pins == 0 && active_interrupts == 0
}
/// See [`crate::context::pin_cpu()`] for a safe wrapper.
///
/// # Safety
/// This is a counter. Each increment must paired with a decrement. Failure to do so will cause
/// the whole system to be in an undefined behavior.
/// Once this value is zero this thread can switch to a different CPU. The code after this value
/// decrement must not depend on a specific CPU.
///
/// This value must not modified by the other thread.
pub unsafe fn active_pins(&self) -> &AtomicU8 {
&self.active_pins
}
/// # Safety
/// This value can only modified by interrupt entry point.
pub unsafe fn active_interrupts(&self) -> &AtomicU8 {
&self.active_interrupts
}
/// # Panics
/// If called from the other thread.
pub unsafe fn critical_sections_mut(&self) -> RefMut<u32> {
self.critical_sections.borrow_mut(self)
}
pub fn active_interrupts(&self) -> usize {
self.active_interrupts
}
pub fn active_mutexes(&self) -> &AtomicU16 {
&self.active_mutexes
pub fn active_mutexes_mut(&self) -> RefMut<u16> {
unsafe { self.active_mutexes.borrow_mut(self) }
}
/// Sleeping address. Zero if this thread is not in a sleep queue.

View File

@ -1,5 +1,5 @@
use super::Thread;
use crate::context::{BorrowedArc, Context};
use crate::context::{current_thread, BorrowedArc};
use core::cell::{RefCell, RefMut};
/// Encapsulates a field of [Thread] that can only be accessed by the CPU that currently executing
@ -22,7 +22,7 @@ impl<T> PrivateCell<T> {
}
fn validate(&self, owner: &Thread) {
let current = Context::thread();
let current = current_thread();
if !core::ptr::eq(BorrowedArc::as_ptr(&current), owner) {
panic!("accessing a private cell from the other thread is not supported");

View File

@ -1,9 +1,9 @@
use crate::context::Context;
use crate::context::current_thread;
/// See `_sleep` on the PS4 for a reference.
pub fn sleep() {
// Remove current thread from sleep queue.
let td = Context::thread();
let td = current_thread();
let addr = td.sleeping_mut();
if *addr != 0 {

View File

@ -1,4 +1,6 @@
use crate::config::boot_env;
use crate::context::current_thread;
use core::sync::atomic::Ordering;
use obconf::BootEnv;
/// Main entry point for interrupt.
@ -7,11 +9,17 @@ use obconf::BootEnv;
///
/// See `trap` function on the PS4 for a reference.
pub extern "C" fn interrupt_handler(frame: &mut TrapFrame) {
let td = current_thread();
unsafe { td.active_interrupts().fetch_add(1, Ordering::Relaxed) };
match frame.num {
TrapNo::Breakpoint => match boot_env() {
BootEnv::Vm(vm) => super::vm::interrupt_handler(vm, frame),
},
}
unsafe { td.active_interrupts().fetch_sub(1, Ordering::Relaxed) };
}
/// Predefined interrupt vector number.

View File

@ -1,7 +1,7 @@
use self::cache::UmaCache;
use crate::context::{Context, CpuLocal};
use crate::lock::Mutex;
use crate::context::{current_thread, CpuLocal};
use alloc::borrow::Cow;
use core::cell::RefCell;
use core::num::NonZero;
mod bucket;
@ -9,8 +9,8 @@ mod cache;
/// Implementation of `uma_zone` structure.
pub struct UmaZone {
size: NonZero<usize>, // uz_size
caches: CpuLocal<Mutex<UmaCache>>, // uz_cpu
size: NonZero<usize>, // uz_size
caches: CpuLocal<RefCell<UmaCache>>, // uz_cpu
}
impl UmaZone {
@ -20,7 +20,7 @@ impl UmaZone {
// basically an implementation of zone_ctor.
Self {
size,
caches: CpuLocal::new(|_| Mutex::new(UmaCache::default())),
caches: CpuLocal::new(|_| RefCell::default()),
}
}
@ -31,15 +31,15 @@ impl UmaZone {
/// See `uma_zalloc_arg` on the PS4 for a reference.
pub fn alloc(&self) -> *mut u8 {
// Our implementation imply M_WAITOK and M_ZERO.
let td = Context::thread();
let td = current_thread();
if td.active_interrupts() != 0 {
panic!("heap allocation in an interrupt handler is not supported");
if !td.can_sleep() {
panic!("heap allocation in a non-sleeping context is not supported");
}
// Try to allocate from per-CPU cache.
let pin = self.caches.lock();
let mut cache = pin.lock();
let mut cache = pin.borrow_mut();
let bucket = cache.alloc_mut();
while let Some(bucket) = bucket {

View File

@ -1,3 +1,4 @@
use crate::context::current_user_rsp_offset;
use crate::trap::interrupt_handler;
use bitfield_struct::bitfield;
use core::arch::{asm, global_asm};
@ -192,7 +193,13 @@ global_asm!(
);
// See Xfast_syscall on the PS4 for a reference.
global_asm!("syscall_entry64:", "swapgs", "ud2");
global_asm!(
"syscall_entry64:",
"swapgs",
"mov gs:[{user_rsp}], rsp",
"ud2",
user_rsp = const current_user_rsp_offset()
);
// See Xfast_syscall32 on the PS4 for a reference.
global_asm!("syscall_entry32:", "ud2");