Ports Gutex to kernel (#986)
Some checks failed
Development Build / Build (push) Failing after 0s
Development Build / Update PRs (push) Failing after 0s

This commit is contained in:
Putta Khunchalee 2024-09-16 02:12:42 +07:00 committed by GitHub
parent 022cfd041c
commit 68c288e3c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 273 additions and 8 deletions

2
.gitignore vendored
View File

@ -2,6 +2,6 @@
/.cache/
/.flatpak-builder/
/.kernel-debug
/Cargo.lock
/build/
/target/
Cargo.lock

View File

@ -0,0 +1,51 @@
use super::GroupGuard;
use core::fmt::{Display, Formatter};
use core::ops::{Deref, DerefMut};
/// RAII structure used to release the exclusive write access of a lock when dropped.
pub struct GutexWriteGuard<'a, T> {
#[allow(dead_code)] // active and value is protected by this lock.
lock: GroupGuard<'a>,
active: *mut usize,
value: *mut T,
}
impl<'a, T> GutexWriteGuard<'a, T> {
/// # Safety
/// `active` and `value` must be protected by `lock`.
pub(super) unsafe fn new(lock: GroupGuard<'a>, active: *mut usize, value: *mut T) -> Self {
Self {
active,
value,
lock,
}
}
}
impl<'a, T> Drop for GutexWriteGuard<'a, T> {
fn drop(&mut self) {
unsafe { *self.active = 0 };
}
}
impl<'a, T> Deref for GutexWriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.value }
}
}
impl<'a, T> DerefMut for GutexWriteGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.value }
}
}
impl<'a, T: Display> Display for GutexWriteGuard<'a, T> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.deref().fmt(f)
}
}
unsafe impl<'a, T: Sync> Sync for GutexWriteGuard<'a, T> {}

View File

@ -0,0 +1,178 @@
use super::MTX_UNOWNED;
use crate::context::Context;
use alloc::rc::Rc;
use alloc::sync::Arc;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::sync::atomic::{AtomicUsize, Ordering};
pub use self::guard::*;
mod guard;
/// A mutex that grant exclusive access to a group of members.
///
/// The [`crate::lock::Mutex`] is prone to deadlock when using on a multiple struct fields like
/// this:
///
/// ```
/// use crate::lock::Mutex;
///
/// pub struct Foo {
/// field1: Mutex<()>,
/// field2: Mutex<()>,
/// }
/// ```
///
/// The order to acquire the lock must be the same everywhere otherwise the deadlock is possible.
/// Maintaining the lock order manually are cumbersome task so we introduce this type to handle this
/// instead.
///
/// How this type are working is simple. Any locks on any member will lock the same mutex in the
/// group, which mean there are only one mutex in the group. It have the same effect as the
/// following code:
///
/// ```
/// use crate::lock::Mutex;
///
/// pub struct Foo {
/// data: Mutex<Data>,
/// }
///
/// struct Data {
/// field1: (),
/// field2: (),
/// }
/// ```
///
/// The bonus point of this type is it will allow recursive lock for read-only access so you will
/// never end up deadlock yourself. It will panic if you try to acquire write access while the
/// readers are still active the same as [`core::cell::RefCell`].
pub struct Gutex<T> {
group: Arc<GutexGroup>,
active: UnsafeCell<usize>,
value: UnsafeCell<T>,
}
impl<T> Gutex<T> {
/// # Panics
/// If there are any active reader or writer.
pub fn write(&self) -> GutexWriteGuard<T> {
// Check if there are active reader or writer.
let lock = self.group.lock();
let active = self.active.get();
// SAFETY: This is safe because we own the lock that protect both active and value.
unsafe {
if *active != 0 {
panic!(
"attempt to acquire the write lock while there are an active reader or writer"
);
}
*active = usize::MAX;
GutexWriteGuard::new(lock, active, self.value.get())
}
}
}
unsafe impl<T: Send> Send for Gutex<T> {}
unsafe impl<T: Send> Sync for Gutex<T> {}
/// Group of [`Gutex`].
pub struct GutexGroup {
owning: AtomicUsize,
active: UnsafeCell<usize>,
}
impl GutexGroup {
pub fn new() -> Arc<Self> {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Arc::new(Self {
owning: AtomicUsize::new(MTX_UNOWNED),
active: UnsafeCell::new(0),
})
}
pub fn spawn<T>(self: &Arc<Self>, value: T) -> Gutex<T> {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Gutex {
group: self.clone(),
active: UnsafeCell::new(0),
value: UnsafeCell::new(value),
}
}
#[inline(never)]
fn lock(&self) -> GroupGuard {
// Check if the calling thread already own the lock.
let td = Context::thread();
let id = Arc::as_ptr(&td) as usize;
if id == self.owning.load(Ordering::Relaxed) {
// SAFETY: This is safe because the current thread own the lock.
return unsafe { GroupGuard::new(self) };
}
// Acquire the lock.
while self
.owning
.compare_exchange(MTX_UNOWNED, id, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
todo!()
}
// SAFETY: This is safe because the current thread acquire the lock successfully by the
// above compare_exchange().
unsafe { GroupGuard::new(self) }
}
}
unsafe impl Send for GutexGroup {}
unsafe impl Sync for GutexGroup {}
/// An RAII object used to release the lock on [`GutexGroup`]. This type cannot be send because it
/// will cause data race on the group when dropping if more than one [`GroupGuard`] are active.
struct GroupGuard<'a> {
group: &'a GutexGroup,
phantom: PhantomData<Rc<()>>, // For !Send and !Sync.
}
impl<'a> GroupGuard<'a> {
/// # Safety
/// The group must be locked by the calling thread with no active references to any of its
/// field.
unsafe fn new(group: &'a GutexGroup) -> Self {
*group.active.get() += 1;
Self {
group,
phantom: PhantomData,
}
}
}
impl<'a> Drop for GroupGuard<'a> {
#[inline(never)]
fn drop(&mut self) {
// Decrease the active lock.
unsafe {
let active = self.group.active.get();
*active -= 1;
if *active != 0 {
return;
}
}
// Release the lock.
self.group.owning.store(MTX_UNOWNED, Ordering::Release);
todo!("wakeup waiting thread");
}
}

View File

@ -1,3 +1,7 @@
pub use self::gutex::*;
pub use self::mutex::*;
mod gutex;
mod mutex;
const MTX_UNOWNED: usize = 4;

View File

@ -1,3 +1,4 @@
use super::MTX_UNOWNED;
use crate::context::Context;
use alloc::rc::Rc;
use alloc::sync::Arc;
@ -14,15 +15,13 @@ pub struct Mutex<T> {
}
impl<T> Mutex<T> {
const OWNER_NONE: usize = 4; // MTX_UNOWNED
/// See `mtx_init` on the PS4 for a reference.
pub fn new(data: T) -> Self {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Self {
data: UnsafeCell::new(data),
owning: AtomicUsize::new(Self::OWNER_NONE),
owning: AtomicUsize::new(MTX_UNOWNED),
phantom: PhantomData,
}
}
@ -40,7 +39,7 @@ impl<T> Mutex<T> {
if self
.owning
.compare_exchange(
Self::OWNER_NONE,
MTX_UNOWNED,
Arc::as_ptr(&td) as usize,
Ordering::Acquire,
Ordering::Relaxed,
@ -72,7 +71,7 @@ impl<T> Mutex<T> {
if lock
.compare_exchange(
Arc::as_ptr(&td) as usize,
Self::OWNER_NONE,
MTX_UNOWNED,
Ordering::Release,
Ordering::Relaxed,
)

View File

@ -4,6 +4,7 @@
use crate::context::Context;
use crate::malloc::KernelHeap;
use crate::proc::{ProcMgr, Thread};
use crate::sched::sleep;
use alloc::sync::Arc;
use core::mem::zeroed;
use core::panic::PanicInfo;
@ -17,6 +18,7 @@ mod lock;
mod malloc;
mod panic;
mod proc;
mod sched;
mod uma;
extern crate alloc;
@ -63,14 +65,18 @@ fn main(pmgr: Arc<ProcMgr>) -> ! {
unsafe { KERNEL_HEAP.activate_stage2() };
// See scheduler() function on the PS4 for a reference.
// See scheduler() function on the PS4 for a reference. Actually it should be called swapper
// instead.
// TODO: Subscribe to "system_suspend_phase2_pre_sync" and "system_resume_phase2" event.
loop {
// TODO: Implement a call to vm_page_count_min().
let procs = pmgr.procs();
if procs.len() == 0 {
todo!();
// TODO: The PS4 check for some value for non-zero but it seems like that value always
// zero.
sleep();
continue;
}
todo!();

View File

@ -1,3 +1,4 @@
use crate::lock::{Gutex, GutexGroup, GutexWriteGuard};
use core::sync::atomic::{AtomicU16, AtomicU32};
/// Implementation of `thread` structure.
@ -11,6 +12,7 @@ pub struct Thread {
critical_sections: AtomicU32, // td_critnest
active_interrupts: usize, // td_intr_nesting_level
active_mutexes: AtomicU16, // td_locks
sleeping: Gutex<usize>, // td_wchan
}
impl Thread {
@ -24,10 +26,13 @@ impl Thread {
//
// td_critnest on the PS4 started with 1 but this does not work in our case because we use
// RAII to increase and decrease it.
let gg = GutexGroup::new();
Self {
critical_sections: AtomicU32::new(0),
active_interrupts: 0,
active_mutexes: AtomicU16::new(0),
sleeping: gg.spawn(0),
}
}
@ -47,4 +52,9 @@ impl Thread {
pub fn active_mutexes(&self) -> &AtomicU16 {
&self.active_mutexes
}
/// Sleeping address. Zero if this thread is not in a sleep queue.
pub fn sleeping_mut(&self) -> GutexWriteGuard<usize> {
self.sleeping.write()
}
}

View File

@ -0,0 +1,3 @@
pub use self::sleep::*;
mod sleep;

View File

@ -0,0 +1,14 @@
use crate::context::Context;
/// See `_sleep` on the PS4 for a reference.
pub fn sleep() {
// Remove current thread from sleep queue.
let td = Context::thread();
let addr = td.sleeping_mut();
if *addr != 0 {
todo!()
}
todo!()
}