Initializes mutex implementation (#984)
Some checks failed
Development Build / Build (push) Failing after 1s
Development Build / Update PRs (push) Failing after 1s

This commit is contained in:
Putta Khunchalee 2024-09-15 15:15:23 +07:00 committed by GitHub
parent c1dcf43d4b
commit b027031086
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 186 additions and 15 deletions

View File

@ -11,6 +11,8 @@ mod arch;
/// # Interupt safety
/// This function is interupt safe.
pub fn boot_env() -> &'static BootEnv {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
// SAFETY: This is safe because the setup() requirements.
unsafe { &*BOOT_ENV }
}
@ -18,6 +20,8 @@ pub fn boot_env() -> &'static BootEnv {
/// # Interupt safety
/// This function is interupt safe.
pub fn config() -> &'static Config {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
// SAFETY: This is safe because the setup() requirements.
unsafe { &*CONFIG }
}
@ -26,6 +30,7 @@ pub fn config() -> &'static Config {
/// This function must be called immediately in the kernel entry point. After that it must never
/// be called again.
pub unsafe fn setup(env: &'static BootEnv, conf: &'static Config) {
// The requirement of this function imply that it is not allowed to access the CPU context.
BOOT_ENV = env;
CONFIG = conf;
}

View File

@ -19,6 +19,8 @@ mod vm;
#[macro_export]
macro_rules! info {
($($args:tt)*) => {
// This macro is not allowed to access the CPU context due to it can be called before the
// context has been activated.
$crate::console::info(file!(), line!(), format_args!($($args)*))
};
}
@ -28,6 +30,8 @@ macro_rules! info {
/// (e.g. no heap allocation).
#[inline(never)]
pub fn info(file: &str, line: u32, msg: impl Display) {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
print(
MsgType::Info,
Log {
@ -45,6 +49,8 @@ pub fn info(file: &str, line: u32, msg: impl Display) {
/// (e.g. no heap allocation).
#[inline(never)]
pub fn error(file: &str, line: u32, msg: impl Display) {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
print(
MsgType::Error,
Log {
@ -61,6 +67,8 @@ pub fn error(file: &str, line: u32, msg: impl Display) {
/// This function is interupt safe as long as [`Display`] implementation on `msg` are interupt safe
/// (e.g. no heap allocation).
fn print(vty: MsgType, msg: impl Display) {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
match boot_env() {
BootEnv::Vm(env) => self::vm::print(env, vty, msg),
}
@ -77,7 +85,8 @@ struct Log<'a, M: Display> {
impl<'a, M: Display> Display for Log<'a, M> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
// This implementation must be interupt safe.
// This implementation must be interupt safe and is not allowed to access the CPU context
// due to it can be called before the context has been activated.
writeln!(
f,
"{}++++++++++++++++++ {} {}:{}{0:#}",

View File

@ -7,6 +7,8 @@ use obvirt::console::{Memory, MsgType};
/// This function is interupt safe as long as [`Display`] implementation on `msg` are interupt safe
/// (e.g. no heap allocation).
pub fn print(env: &Vm, ty: MsgType, msg: impl Display) {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
let c = env.console as *mut Memory;
let mut w = Writer(c);
@ -19,7 +21,8 @@ struct Writer(*mut Memory);
impl Write for Writer {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
// This implementation must be interupt safe.
// This implementation must be interupt safe and is not allowed to access the CPU context
// due to it can be called before the context has been activated.
unsafe { write_volatile(addr_of_mut!((*self.0).msg_len), s.len()) };
unsafe { write_volatile(addr_of_mut!((*self.0).msg_addr), s.as_ptr() as usize) };
Ok(())

View File

@ -4,6 +4,10 @@ use alloc::vec::Vec;
use core::ops::Deref;
/// Encapsulates per-CPU value.
///
/// In theory you can use `RefCell` to have a mutable access to the value but it is prone to panic
/// because the CPU is allowed to switch to the other thread, which will panic if the new thread
/// attemp to lock the same `RefCell`.
pub struct CpuLocal<T>(Vec<T>);
impl<T> CpuLocal<T> {

View File

@ -15,6 +15,12 @@ mod local;
/// not safe to have a temporary a pointer or reference to this struct or its field because the CPU
/// might get interupted, which mean it is possible for the next instruction to get executed on
/// a different CPU if the interupt cause the CPU to switch the task.
///
/// The activation of this struct is a minimum requirements for a new CPU to call most of the other
/// functions. The new CPU should call [`Context::activate`] as soon as possible. We don't make the
/// functions that require this context as `unsafe` nor make it check for the context because it
/// will be (almost) all of it. So we impose this requirement on a function that setup a CPU
/// instead.
pub struct Context {
cpu: usize, // pc_cpuid
thread: AtomicPtr<Thread>, // pc_curthread
@ -23,6 +29,8 @@ pub struct Context {
impl Context {
/// See `pcpu_init` on the PS4 for a reference.
pub fn new(cpu: usize, td: Arc<Thread>) -> Self {
// This function is not allowed to access the activated context due to it can be called
// without the activation of the other context.
Self {
cpu,
thread: AtomicPtr::new(Arc::into_raw(td).cast_mut()),
@ -50,7 +58,6 @@ impl Context {
/// dropped (but it is allowed to sleep).
///
/// See `critical_enter` and `critical_exit` on the PS4 for a reference.
#[inline(never)]
pub fn pin() -> PinnedContext {
// Relax ordering should be enough here since this increment will be checked by the same CPU
// when an interupt happens.
@ -73,6 +80,8 @@ impl Context {
impl Drop for Context {
fn drop(&mut self) {
// This function is not allowed to access the activated context due to it can be called
// before context activation.
unsafe { drop(Arc::from_raw(self.thread.load(Ordering::Relaxed))) };
}
}

View File

@ -43,7 +43,7 @@ pub unsafe fn thread() -> *const Thread {
asm!(
"mov {out}, gs:[{off}]",
off = in(reg) offset_of!(Context, thread),
off = in(reg) offset_of!(Context, thread), // TODO: Use const from Rust 1.82.
out = out(reg) td,
options(pure, readonly, preserves_flags, nostack)
);
@ -52,13 +52,13 @@ pub unsafe fn thread() -> *const Thread {
}
pub unsafe fn cpu() -> usize {
// SAFETY: This load load need to synchronize with a critical section. That mean we cannot use
// SAFETY: This load need to synchronize with a critical section. That mean we cannot use
// "pure" + "readonly" options here.
let mut cpu;
asm!(
"mov {out}, gs:[{off}]",
off = in(reg) offset_of!(Context, cpu),
off = in(reg) offset_of!(Context, cpu), // TODO: Use const from Rust 1.82.
out = out(reg) cpu,
options(preserves_flags, nostack)
);

View File

@ -0,0 +1,3 @@
pub use self::mutex::*;
mod mutex;

View File

@ -0,0 +1,109 @@
use crate::context::Context;
use crate::proc::Thread;
use alloc::rc::Rc;
use alloc::sync::Arc;
use core::cell::UnsafeCell;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicUsize, Ordering};
/// Implementation of `mtx` structure.
pub struct Mutex<T> {
data: UnsafeCell<T>,
owning: AtomicUsize, // mtx_lock
phantom: PhantomData<Rc<()>>, // For !Send and !Sync.
}
impl<T> Mutex<T> {
const OWNER_NONE: usize = 4; // MTX_UNOWNED
/// See `mtx_init` on the PS4 for a reference.
pub fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
owning: AtomicUsize::new(Self::OWNER_NONE),
phantom: PhantomData,
}
}
/// See `_mtx_lock_flags` on the PS4 for a reference.
pub fn lock(&self) -> MutexGuard<T> {
// Disallow locking in an interupt handler.
let td = Context::thread();
if td.active_interrupts() != 0 {
panic!("locking a mutex in an interupt handler is not supported");
}
// Take ownership.
if self
.owning
.compare_exchange(
Self::OWNER_NONE,
Arc::as_ptr(&td) as usize,
Ordering::Acquire,
Ordering::Relaxed,
)
.is_err()
{
todo!()
}
td.active_mutexes().fetch_add(1, Ordering::Relaxed);
MutexGuard {
mtx: self,
td,
phantom: PhantomData,
}
}
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is dropped (falls out
/// of scope), the lock will be unlocked.
pub struct MutexGuard<'a, T> {
mtx: &'a Mutex<T>,
td: Arc<Thread>,
phantom: PhantomData<Rc<()>>, // For !Send and !Sync.
}
impl<'a, T> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
// This method basically an implementation of _mtx_unlock_flags.
self.td.active_mutexes().fetch_sub(1, Ordering::Relaxed);
// TODO: There is a check for (m->lock_object).lo_data == 0 on the PS4.
if self
.mtx
.owning
.compare_exchange(
Arc::as_ptr(&self.td) as usize,
Mutex::<T>::OWNER_NONE,
Ordering::Release,
Ordering::Relaxed,
)
.is_err()
{
todo!()
}
}
}
impl<'a, T> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.mtx.data.get() }
}
}
impl<'a, T> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.mtx.data.get() }
}
}
unsafe impl<'a, T: Sync> Sync for MutexGuard<'a, T> {}

View File

@ -14,6 +14,7 @@ mod config;
mod console;
mod context;
mod imgfmt;
mod lock;
mod malloc;
mod panic;
mod proc;
@ -79,7 +80,8 @@ fn main() -> ! {
#[allow(dead_code)]
#[cfg_attr(target_os = "none", panic_handler)]
fn panic(i: &PanicInfo) -> ! {
// Get location.
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
let (file, line) = match i.location() {
Some(v) => (v.file(), v.line()),
None => ("unknown", 0),

View File

@ -47,6 +47,8 @@ impl KernelHeap {
impl Drop for KernelHeap {
fn drop(&mut self) {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
let stage2 = self.stage2.load(Ordering::Acquire);
if !stage2.is_null() {
@ -58,6 +60,8 @@ impl Drop for KernelHeap {
unsafe impl GlobalAlloc for KernelHeap {
#[inline(never)]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage2
.load(Ordering::Acquire)
@ -68,6 +72,8 @@ unsafe impl GlobalAlloc for KernelHeap {
#[inline(never)]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
if self.stage1.is_owner(ptr) {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our
// GlobalAlloc::alloc and layout to be the same one that passed to it.

View File

@ -4,6 +4,9 @@ use talc::{ClaimOnOom, Span, Talc};
/// Stage 1 kernel heap.
///
/// This stage is not allowed to access the CPU context due to it can be used before the context has
/// been activated.
///
/// This stage allocate a memory from a static buffer (AKA arena).
pub struct Stage1 {
engine: spin::Mutex<Talc<ClaimOnOom>>,
@ -49,3 +52,5 @@ impl Stage1 {
self.engine.lock().free(NonNull::new_unchecked(ptr), layout);
}
}
unsafe impl Sync for Stage1 {}

View File

@ -5,6 +5,8 @@ use core::arch::asm;
/// # Interupt safety
/// This function is interupt safe.
pub fn panic() -> ! {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
loop {
#[cfg(target_arch = "aarch64")]
unsafe {

View File

@ -1,4 +1,4 @@
use core::sync::atomic::AtomicU32;
use core::sync::atomic::{AtomicU16, AtomicU32};
/// Implementation of `thread` structure.
///
@ -10,6 +10,7 @@ use core::sync::atomic::AtomicU32;
pub struct Thread {
critical_sections: AtomicU32, // td_critnest
active_interrupts: usize, // td_intr_nesting_level
active_mutexes: AtomicU16, // td_locks
}
impl Thread {
@ -18,11 +19,15 @@ impl Thread {
/// responsibility to configure the thread after this so it have a proper states and trigger
/// necessary events.
pub unsafe fn new_bare() -> Self {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
//
// td_critnest on the PS4 started with 1 but this does not work in our case because we use
// RAII to increase and decrease it.
Self {
critical_sections: AtomicU32::new(0),
active_interrupts: 0,
active_mutexes: AtomicU16::new(0),
}
}
@ -38,4 +43,8 @@ impl Thread {
pub fn active_interrupts(&self) -> usize {
self.active_interrupts
}
pub fn active_mutexes(&self) -> &AtomicU16 {
&self.active_mutexes
}
}

View File

@ -7,7 +7,7 @@ pub struct UmaCache {
}
impl UmaCache {
pub fn alloc(&self) -> Option<&UmaBucket> {
self.alloc.as_ref()
pub fn alloc_mut(&mut self) -> Option<&mut UmaBucket> {
self.alloc.as_mut()
}
}

View File

@ -1,5 +1,6 @@
use self::cache::UmaCache;
use crate::context::{Context, CpuLocal};
use crate::lock::Mutex;
use alloc::borrow::Cow;
use core::num::NonZero;
@ -8,8 +9,8 @@ mod cache;
/// Implementation of `uma_zone` structure.
pub struct UmaZone {
size: NonZero<usize>, // uz_size
caches: CpuLocal<UmaCache>, // uz_cpu
size: NonZero<usize>, // uz_size
caches: CpuLocal<Mutex<UmaCache>>, // uz_cpu
}
impl UmaZone {
@ -19,7 +20,7 @@ impl UmaZone {
// basically an implementation of zone_ctor.
Self {
size,
caches: CpuLocal::new(|_| UmaCache::default()),
caches: CpuLocal::new(|_| Mutex::new(UmaCache::default())),
}
}
@ -37,8 +38,9 @@ impl UmaZone {
}
// Try to allocate from per-CPU cache.
let cache = self.caches.lock();
let bucket = cache.alloc();
let pin = self.caches.lock();
let mut cache = pin.lock();
let bucket = cache.alloc_mut();
while let Some(bucket) = bucket {
if bucket.len() != 0 {
@ -48,6 +50,9 @@ impl UmaZone {
todo!()
}
drop(cache);
drop(pin);
todo!()
}
}