Initializes malloc stats (#973)

This commit is contained in:
Putta Khunchalee 2024-09-08 20:37:20 +07:00 committed by GitHub
parent a4148c67a9
commit 637fd80913
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 160 additions and 34 deletions

View File

@ -1,11 +1,19 @@
#![no_std]
use core::num::NonZero;
pub use self::env::*;
mod env;
/// Information about the boot environment.
/// Contains information about the boot environment.
#[repr(C)]
pub enum BootEnv {
Vm(Vm),
}
/// Runtime configurations for the kernel.
#[repr(C)]
pub struct Config {
pub max_cpu: NonZero<usize>,
}

View File

@ -1,30 +1,33 @@
use core::ptr::null;
use macros::elf_note;
use obconf::BootEnv;
use obconf::{BootEnv, Config};
#[cfg(target_arch = "aarch64")]
pub use self::aarch64::*;
#[cfg(target_arch = "x86_64")]
pub use self::x86_64::*;
pub use self::arch::*;
#[cfg(target_arch = "aarch64")]
mod aarch64;
#[cfg(target_arch = "x86_64")]
mod x86_64;
#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
mod arch;
pub fn boot_env() -> &'static BootEnv {
// SAFETY: This is safe because the set_boot_env() requirements.
// SAFETY: This is safe because the setup() requirements.
unsafe { &*BOOT_ENV }
}
pub fn config() -> &'static Config {
// SAFETY: This is safe because the setup() requirements.
unsafe { &*CONFIG }
}
/// # Safety
/// This function must be called immediately in the kernel entry point. After that it must never
/// be called again.
pub unsafe fn set_boot_env(env: &'static BootEnv) {
pub unsafe fn setup(env: &'static BootEnv, conf: &'static Config) {
BOOT_ENV = env;
CONFIG = conf;
}
static mut BOOT_ENV: *const BootEnv = null();
static mut CONFIG: *const Config = null();
#[elf_note(section = ".note.obkrnl.page-size", name = "obkrnl", ty = 0)]
static NOTE_PAGE_SIZE: [u8; size_of::<usize>()] = PAGE_SIZE.to_ne_bytes();

View File

@ -8,3 +8,7 @@ pub unsafe fn activate(_: *mut Context) {
pub unsafe fn thread() -> *const Thread {
todo!();
}
pub unsafe fn current() -> *const Context {
todo!();
}

View File

@ -12,20 +12,16 @@ mod arch;
/// not safe to have a temporary a pointer or reference to this struct or its field because the CPU
/// might get interupted, which mean it is possible for the next instruction to get executed on
/// a different CPU if the interupt cause the CPU to switch the task.
///
/// We don't support `pc_cpuid` field here because it value is 100% unpredictable due to the above
/// reason. Once we have loaded `pc_cpuid` the next instruction might get executed on a different
/// CPU, which render the loaded value incorrect. The only way to prevent this issue is to disable
/// interupt before reading `pc_cpuid`, which can make the CPU missed some events from the other
/// hardwares.
pub struct Context {
cpu: usize, // pc_cpuid
thread: AtomicPtr<Thread>, // pc_curthread
}
impl Context {
/// See `pcpu_init` on the PS4 for a reference.
pub fn new(td: Arc<Thread>) -> Self {
pub fn new(cpu: usize, td: Arc<Thread>) -> Self {
Self {
cpu,
thread: AtomicPtr::new(Arc::into_raw(td).cast_mut()),
}
}
@ -35,10 +31,28 @@ impl Context {
// it is going to be the same one since it represent the current thread.
let td = unsafe { self::arch::thread() };
// We cannot return a reference here because it requires 'static lifetime, which allow the
// caller to store it at a global level. Once the thread is destroyed that reference will be
// invalid.
unsafe { Arc::increment_strong_count(td) };
unsafe { Arc::from_raw(td) }
}
/// See `critical_enter` and `critical_exit` on the PS4 for a reference.
pub fn pin() -> PinnedContext {
// TODO: Verify if memory ordering here is correct. We need a call to self::arch::current()
// to execute after the thread is in a critical section. The CPU must not reorder this. Our
// current implementation follow how Drop on Arc is implemented.
let td = unsafe { self::arch::thread() };
unsafe { (*td).critical_sections().fetch_add(1, Ordering::Release) };
core::sync::atomic::fence(Ordering::Acquire);
// Once the thread is in a critical section it will never be switch a CPU so it is safe to
// keep a pointer to a context here.
PinnedContext(unsafe { self::arch::current() })
}
/// # Safety
/// The only place this method is safe to call is in the CPU entry point. Once this method
/// return this instance must outlive the CPU lifetime and it must never be accessed via this
@ -54,3 +68,26 @@ impl Drop for Context {
unsafe { drop(Arc::from_raw(self.thread.load(Ordering::Relaxed))) };
}
}
/// RAII struct to pin the current thread to current CPU.
///
/// This struct must not implement [`Send`] and [`Sync`]. Currently it stored a pointer, which will
/// make it `!Send` and `!Sync`.
pub struct PinnedContext(*const Context);
impl PinnedContext {
pub fn cpu(&self) -> usize {
unsafe { (*self.0).cpu }
}
}
impl Drop for PinnedContext {
fn drop(&mut self) {
// TODO: Verify if memory ordering here is correct.
let td = unsafe { (*self.0).thread.load(Ordering::Relaxed) };
unsafe { (*td).critical_sections().fetch_sub(1, Ordering::Release) };
// TODO: Implement td_owepreempt.
}
}

View File

@ -15,7 +15,7 @@ pub unsafe fn activate(cx: *mut Context) {
in("ecx") 0xc0000101u32,
in("edx") cx >> 32,
in("eax") cx,
options(preserves_flags, nostack)
options(nomem, preserves_flags, nostack)
);
// Clear FS and GS for user mode.
@ -24,7 +24,7 @@ pub unsafe fn activate(cx: *mut Context) {
in("ecx") 0xc0000100u32,
in("edx") 0,
in("eax") 0,
options(preserves_flags, nostack)
options(nomem, preserves_flags, nostack)
);
asm!(
@ -32,7 +32,7 @@ pub unsafe fn activate(cx: *mut Context) {
in("ecx") 0xc0000102u32,
in("edx") 0,
in("eax") 0,
options(preserves_flags, nostack)
options(nomem, preserves_flags, nostack)
);
}
@ -50,3 +50,23 @@ pub unsafe fn thread() -> *const Thread {
td
}
pub unsafe fn current() -> *const Context {
// Load current GS.
let mut edx: u32;
let mut eax: u32;
asm!(
"rdmsr",
in("ecx") 0xc0000101u32,
out("edx") edx,
out("eax") eax,
options(pure, nomem, preserves_flags, nostack)
);
// Combine EDX and EAX.
let edx = edx as usize;
let eax = eax as usize;
((edx << 32) | eax) as *const Context
}

View File

@ -1,15 +1,15 @@
#![no_std]
#![cfg_attr(not(test), no_main)]
use crate::config::set_boot_env;
use crate::context::Context;
use crate::malloc::KernelHeap;
use crate::proc::Thread;
use alloc::sync::Arc;
use core::arch::asm;
use core::mem::zeroed;
use core::num::NonZero;
use core::panic::PanicInfo;
use obconf::BootEnv;
use obconf::{BootEnv, Config};
mod config;
mod console;
@ -37,8 +37,13 @@ extern crate alloc;
#[allow(dead_code)]
#[cfg_attr(target_os = "none", no_mangle)]
extern "C" fn _start(env: &'static BootEnv) -> ! {
// TODO: Accept config from bootloader/hypervisor.
static CONFIG: Config = Config {
max_cpu: unsafe { NonZero::new_unchecked(1) },
};
// SAFETY: This is safe because we called it as the first thing here.
unsafe { set_boot_env(env) };
unsafe { crate::config::setup(env, &CONFIG) };
info!("Starting Obliteration Kernel.");
@ -48,7 +53,7 @@ extern "C" fn _start(env: &'static BootEnv) -> ! {
// Setup CPU context. We use a different mechanism here. The PS4 put all of pcpu at a global
// level but we put it on each CPU stack instead.
let thread0 = Arc::new(thread0);
let mut cx = Context::new(thread0);
let mut cx = Context::new(0, thread0);
// SAFETY: We are in the main CPU entry point and we move all the remaining code after this into
// a dedicated no-return function.

View File

@ -1,10 +1,11 @@
use crate::config::PAGE_SIZE;
use crate::config::{config, PAGE_SIZE};
use crate::context::Context;
use crate::uma::UmaZone;
use alloc::string::ToString;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::alloc::Layout;
use core::sync::atomic::{AtomicU64, Ordering};
/// Stage 2 kernel heap.
///
@ -12,6 +13,7 @@ use core::alloc::Layout;
/// `malloc_type` and `malloc_type_internal` structure.
pub struct Stage2 {
zones: [Vec<Arc<UmaZone>>; (usize::BITS - 1) as usize], // kmemsize + kmemzones
stats: Vec<Stats>, // mti_stats
}
impl Stage2 {
@ -54,7 +56,14 @@ impl Stage2 {
zones
});
Self { zones }
// TODO: Is there a better way than this?
let mut stats = Vec::with_capacity(config().max_cpu.get());
for _ in 0..config().max_cpu.get() {
stats.push(Stats::default());
}
Self { zones, stats }
}
/// See `malloc` on the PS4 for a reference.
@ -82,8 +91,24 @@ impl Stage2 {
size
};
// TODO: There are more logic after this on the PS4.
self.zones[align][size >> Self::KMEM_ZSHIFT].alloc()
// Allocate a memory from UMA zone.
let zone = &self.zones[align][size >> Self::KMEM_ZSHIFT];
let mem = zone.alloc();
// Update stats.
let cx = Context::pin();
let stats = &self.stats[cx.cpu()];
let size = if mem.is_null() { 0 } else { zone.size() };
if size != 0 {
stats
.alloc_bytes
.fetch_add(size.try_into().unwrap(), Ordering::Relaxed);
stats.alloc_count.fetch_add(1, Ordering::Relaxed);
}
// TODO: How to update mts_size here since our zone table also indexed by alignment?
mem
} else {
todo!()
}
@ -96,3 +121,10 @@ impl Stage2 {
todo!()
}
}
/// Implementation of `malloc_type_stats` structure.
#[derive(Default)]
struct Stats {
alloc_bytes: AtomicU64, // mts_memalloced
alloc_count: AtomicU64, // mts_numallocs
}

View File

@ -1,9 +1,12 @@
use core::sync::atomic::AtomicU32;
/// Implementation of `thread` structure.
///
/// All thread **must** run to completion once execution has been started otherwise resource will be
/// leak if the thread is dropped while its execution currently in the kernel space.
pub struct Thread {
active_interrupts: usize, // td_intr_nesting_level
critical_sections: AtomicU32, // td_critnest
active_interrupts: usize, // td_intr_nesting_level
}
impl Thread {
@ -12,11 +15,18 @@ impl Thread {
/// responsibility to configure the thread after this so it have a proper states and trigger
/// necessary events.
pub unsafe fn new_bare() -> Self {
// td_critnest on the PS4 started with 1 but this does not work in our case because we use
// RAII to increase and decrease it.
Self {
critical_sections: AtomicU32::new(0),
active_interrupts: 0,
}
}
pub fn critical_sections(&self) -> &AtomicU32 {
&self.critical_sections
}
pub fn active_interrupts(&self) -> usize {
self.active_interrupts
}

View File

@ -1,12 +1,19 @@
use alloc::borrow::Cow;
/// Implementation of `uma_zone` structure.
pub struct UmaZone {}
pub struct UmaZone {
size: usize, // uz_size
}
impl UmaZone {
/// See `uma_zcreate` on the PS4 for a reference.
pub fn new(_: Cow<'static, str>, _: usize, _: usize) -> Self {
Self {}
pub fn new(_: Cow<'static, str>, size: usize, _: usize) -> Self {
// TODO: Check if size is allowed to be zero. If not, change it to NonZero<usize>.
Self { size }
}
pub fn size(&self) -> usize {
self.size
}
/// See `uma_zalloc_arg` on the PS4 for a reference.