Initializes ProcMgr (#985)
Some checks failed
Development Build / Build (push) Failing after 0s
Development Build / Update PRs (push) Failing after 0s
Housekeep / Housekeep (push) Failing after 0s

This commit is contained in:
Putta Khunchalee 2024-09-15 19:15:33 +07:00 committed by GitHub
parent b027031086
commit 022cfd041c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 193 additions and 48 deletions

View File

@ -10,7 +10,6 @@ use crate::vmm::VmmError;
use libc::{mmap, open, MAP_FAILED, MAP_PRIVATE, O_RDWR, PROT_READ, PROT_WRITE};
use std::os::fd::{AsRawFd, FromRawFd, OwnedFd};
use std::ptr::null_mut;
use std::sync::Arc;
use thiserror::Error;
#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]

View File

@ -5,6 +5,7 @@ edition = "2021"
[dependencies]
anstyle = { version = "1.0.8", default-features = false }
hashbrown = "0.14.5"
macros = { path = "../macros" }
obconf = { path = "../obconf" }
obvirt = { path = "../obvirt" }

View File

@ -1,4 +1,4 @@
use crate::proc::Thread;
use crate::proc::{ProcMgr, Thread};
use alloc::sync::Arc;
use core::sync::atomic::{AtomicPtr, Ordering};
@ -21,19 +21,35 @@ mod local;
/// functions that require this context as `unsafe` nor make it check for the context because it
/// will be (almost) all of it. So we impose this requirement on a function that setup a CPU
/// instead.
///
/// Beware for any type that implement [`Drop`] because it may access the CPU context. For maximum
/// safety the CPU setup function **must not cause any value of the kernel type to drop before
/// context is activated**. It is safe to drop values of Rust core type (e.g. `String`) **only on a
/// main CPU** because the only kernel functions it can call into is either stage 1 allocator or
/// panic handler, both of them does not require a CPU context.
#[allow(dead_code)] // All fields accessed by inline assembly.
pub struct Context {
cpu: usize, // pc_cpuid
thread: AtomicPtr<Thread>, // pc_curthread
pmgr: *const ProcMgr,
}
impl Context {
/// Once this function return you should call [`Self::activate()`] as soon as possible. The
/// returned value cannot be dropped otherwise it will be panic.
///
/// See `pcpu_init` on the PS4 for a reference.
pub fn new(cpu: usize, td: Arc<Thread>) -> Self {
///
/// # Safety
/// - `cpu` must be unique and valid.
/// - `pmgr` must be the same one for all context.
pub unsafe fn new(cpu: usize, td: Arc<Thread>, pmgr: Arc<ProcMgr>) -> Self {
// This function is not allowed to access the activated context due to it can be called
// without the activation of the other context.
Self {
cpu,
thread: AtomicPtr::new(Arc::into_raw(td).cast_mut()),
pmgr: Arc::into_raw(pmgr),
}
}
@ -80,9 +96,7 @@ impl Context {
impl Drop for Context {
fn drop(&mut self) {
// This function is not allowed to access the activated context due to it can be called
// before context activation.
unsafe { drop(Arc::from_raw(self.thread.load(Ordering::Relaxed))) };
panic!("dropping Context can cause a bug so it is not supported");
}
}

View File

@ -1,5 +1,4 @@
use crate::context::Context;
use crate::proc::Thread;
use alloc::rc::Rc;
use alloc::sync::Arc;
use core::cell::UnsafeCell;
@ -19,6 +18,8 @@ impl<T> Mutex<T> {
/// See `mtx_init` on the PS4 for a reference.
pub fn new(data: T) -> Self {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Self {
data: UnsafeCell::new(data),
owning: AtomicUsize::new(Self::OWNER_NONE),
@ -52,36 +53,26 @@ impl<T> Mutex<T> {
td.active_mutexes().fetch_add(1, Ordering::Relaxed);
MutexGuard {
mtx: self,
td,
data: self.data.get(),
lock: &self.owning,
phantom: PhantomData,
}
}
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
/// See `_mtx_unlock_flags` on the PS4 for a reference.
///
/// # Safety
/// Must be called by the thread that own `lock`.
unsafe fn unlock(lock: &AtomicUsize) {
let td = Context::thread();
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is dropped (falls out
/// of scope), the lock will be unlocked.
pub struct MutexGuard<'a, T> {
mtx: &'a Mutex<T>,
td: Arc<Thread>,
phantom: PhantomData<Rc<()>>, // For !Send and !Sync.
}
impl<'a, T> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
// This method basically an implementation of _mtx_unlock_flags.
self.td.active_mutexes().fetch_sub(1, Ordering::Relaxed);
td.active_mutexes().fetch_sub(1, Ordering::Relaxed);
// TODO: There is a check for (m->lock_object).lo_data == 0 on the PS4.
if self
.mtx
.owning
if lock
.compare_exchange(
Arc::as_ptr(&self.td) as usize,
Mutex::<T>::OWNER_NONE,
Arc::as_ptr(&td) as usize,
Self::OWNER_NONE,
Ordering::Release,
Ordering::Relaxed,
)
@ -92,18 +83,88 @@ impl<'a, T> Drop for MutexGuard<'a, T> {
}
}
unsafe impl<T: Send> Send for Mutex<T> {}
unsafe impl<T: Send> Sync for Mutex<T> {}
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is dropped (falls out
/// of scope), the lock will be unlocked.
///
/// This struct must not implement [`Send`].
pub struct MutexGuard<'a, T> {
data: *mut T,
lock: *const AtomicUsize,
phantom: PhantomData<&'a Mutex<T>>,
}
impl<'a, T> MutexGuard<'a, T> {
pub fn map<O, F>(this: Self, f: F) -> MappedMutex<'a, O>
where
F: FnOnce(&'a mut T) -> O + 'a,
{
let data = unsafe { f(&mut *this.data) };
let lock = this.lock;
core::mem::forget(this);
MappedMutex {
data,
lock,
phantom: PhantomData,
}
}
}
impl<'a, T> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
// SAFETY: This struct does not implement Send.
unsafe { Mutex::<T>::unlock(&*self.lock) };
}
}
impl<'a, T> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.mtx.data.get() }
unsafe { &*self.data }
}
}
impl<'a, T> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.mtx.data.get() }
unsafe { &mut *self.data }
}
}
unsafe impl<'a, T: Sync> Sync for MutexGuard<'a, T> {}
/// An RAII mutex guard returned by [`MutexGuard::map()`].
///
/// This struct must not implement [`Send`].
pub struct MappedMutex<'a, T> {
data: T,
lock: *const AtomicUsize,
phantom: PhantomData<&'a Mutex<T>>,
}
impl<'a, T> Drop for MappedMutex<'a, T> {
fn drop(&mut self) {
// SAFETY: This struct does not implement Send.
unsafe { Mutex::<T>::unlock(&*self.lock) };
}
}
impl<'a, T> Deref for MappedMutex<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<'a, T> DerefMut for MappedMutex<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
unsafe impl<'a, T: Sync> Sync for MappedMutex<'a, T> {}

View File

@ -3,9 +3,8 @@
use crate::context::Context;
use crate::malloc::KernelHeap;
use crate::proc::Thread;
use crate::proc::{ProcMgr, Thread};
use alloc::sync::Arc;
use core::arch::asm;
use core::mem::zeroed;
use core::panic::PanicInfo;
use obconf::{BootEnv, Config};
@ -37,7 +36,7 @@ extern crate alloc;
#[allow(dead_code)]
#[cfg_attr(target_os = "none", no_mangle)]
extern "C" fn _start(env: &'static BootEnv, conf: &'static Config) -> ! {
// SAFETY: This is safe because we called it as the first thing here.
// SAFETY: This function has a lot of restrictions. See Context documentation for more details.
unsafe { crate::config::setup(env, conf) };
info!("Starting Obliteration Kernel.");
@ -45,33 +44,36 @@ extern "C" fn _start(env: &'static BootEnv, conf: &'static Config) -> ! {
// Setup thread0 to represent this thread.
let thread0 = unsafe { Thread::new_bare() };
// Setup CPU context. We use a different mechanism here. The PS4 put all of pcpu at a global
// Initialize foundations.
let pmgr = ProcMgr::new();
// Activate CPU context. We use a different mechanism here. The PS4 put all of pcpu at a global
// level but we put it on each CPU stack instead.
let thread0 = Arc::new(thread0);
let mut cx = Context::new(0, thread0);
let mut cx = unsafe { Context::new(0, thread0, pmgr.clone()) };
// SAFETY: We are in the main CPU entry point and we move all the remaining code after this into
// a dedicated no-return function.
unsafe { cx.activate() };
main();
main(pmgr);
}
fn main() -> ! {
fn main(pmgr: Arc<ProcMgr>) -> ! {
// Activate stage 2 heap.
info!("Activating stage 2 heap.");
unsafe { KERNEL_HEAP.activate_stage2() };
// See scheduler() function on the PS4 for a reference.
// TODO: Subscribe to "system_suspend_phase2_pre_sync" and "system_resume_phase2" event.
loop {
#[cfg(target_arch = "x86_64")]
unsafe {
asm!("hlt")
};
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("wfi")
};
// TODO: Implement a call to vm_page_count_min().
let procs = pmgr.procs();
if procs.len() == 0 {
todo!();
}
todo!();
}
}

View File

@ -1,3 +1,30 @@
use crate::lock::{MappedMutex, Mutex, MutexGuard};
use alloc::sync::{Arc, Weak};
use hashbrown::HashMap;
pub use self::pid::*;
pub use self::process::*;
pub use self::thread::*;
mod pid;
mod process;
mod thread;
/// Manage all processes in the system.
pub struct ProcMgr {
procs: Mutex<HashMap<Pid, Weak<Proc>>>, // allproc + pidhashtbl + zombproc
}
impl ProcMgr {
pub fn new() -> Arc<Self> {
// This function is not allowed to access the CPU context due to it can be called before the
// context has been activated.
Arc::new(Self {
procs: Mutex::new(HashMap::new()),
})
}
pub fn procs(&self) -> MappedMutex<impl ExactSizeIterator<Item = &Weak<Proc>> + '_> {
MutexGuard::map(self.procs.lock(), |procs| procs.values())
}
}

View File

@ -0,0 +1,39 @@
use core::borrow::Borrow;
use core::ffi::c_int;
/// Unique identifier of a process.
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Pid(c_int);
impl Pid {
pub const KERNEL: Self = Self(0);
pub const IDLE: Self = Self(10);
/// Returns [`None`] if `v` is negative.
pub const fn new(v: c_int) -> Option<Self> {
if v >= 0 {
Some(Self(v))
} else {
None
}
}
}
impl Borrow<c_int> for Pid {
fn borrow(&self) -> &c_int {
&self.0
}
}
impl PartialEq<c_int> for Pid {
fn eq(&self, other: &c_int) -> bool {
self.0 == *other
}
}
impl PartialEq<Pid> for c_int {
fn eq(&self, other: &Pid) -> bool {
*self == other.0
}
}

View File

@ -0,0 +1,2 @@
/// Implementation of `proc` structure.
pub struct Proc {}