Moves UmaZone behind Uma (#1111)

This commit is contained in:
Putta Khunchalee 2024-11-18 02:37:09 +07:00 committed by GitHub
parent a4c79a9c9c
commit 44978321bf
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 255 additions and 260 deletions

11
.vscode/settings.json vendored
View File

@ -1,10 +1,21 @@
{
"[json]": {
"editor.formatOnSave": true
},
"[rust]": {
"editor.defaultFormatter": "rust-lang.rust-analyzer",
"editor.formatOnSave": true,
"editor.rulers": [
100
]
},
"[slint]": {
"editor.defaultFormatter": "Slint.slint",
"editor.formatOnSave": true
},
"[toml]": {
"editor.formatOnSave": true
},
"clangd.arguments": [
"--header-insertion=never"
],

1
Cargo.lock generated
View File

@ -3153,7 +3153,6 @@ dependencies = [
"hashbrown",
"macros",
"obconf",
"spin",
"talc",
"x86-64",
]

View File

@ -9,7 +9,6 @@ bitfield-struct = "0.9.2"
hashbrown = "0.14.5"
macros = { path = "../macros" }
obconf = { path = "../src/obconf", features = ["virt"] }
spin = { version = "0.9.8", features = ["spin_mutex"], default-features = false }
talc = { version = "4.4.1", default-features = false }
[target.'cfg(target_arch = "x86_64")'.dependencies]

View File

@ -24,12 +24,16 @@ mod local;
/// - This function can be called only once per CPU.
/// - `cpu` must be unique and valid.
/// - `pmgr` must be the same for all context.
pub unsafe fn run_with_context(
///
/// # Panics
/// If `f` return. The reason we don't use `!` for a return type of `F` because it requires nightly
/// Rust.
pub unsafe fn run_with_context<R, F: FnOnce() -> R>(
cpu: usize,
td: Arc<Thread>,
pmgr: Arc<ProcMgr>,
args: ContextArgs,
f: fn() -> !,
f: F,
) -> ! {
// We use a different mechanism here. The PS4 put all of pcpu at a global level but we put it on
// each CPU stack instead.
@ -48,6 +52,8 @@ pub unsafe fn run_with_context(
core::sync::atomic::fence(Ordering::AcqRel);
f();
panic!("return from a function passed to run_with_context() is not supported");
}
/// # Interrupt safety

View File

@ -3,14 +3,14 @@ use core::fmt::{Display, Formatter};
use core::ops::{Deref, DerefMut};
/// RAII structure used to release the exclusive write access of a lock when dropped.
pub struct GutexWriteGuard<'a, T> {
#[allow(dead_code)] // active and value is protected by this lock.
pub struct GutexWrite<'a, T> {
#[allow(dead_code)] // active and value fields is protected by this lock.
lock: GroupGuard<'a>,
active: *mut usize,
value: *mut T,
}
impl<'a, T> GutexWriteGuard<'a, T> {
impl<'a, T> GutexWrite<'a, T> {
/// # Safety
/// `active` and `value` must be protected by `lock`.
pub(super) unsafe fn new(lock: GroupGuard<'a>, active: *mut usize, value: *mut T) -> Self {
@ -22,13 +22,13 @@ impl<'a, T> GutexWriteGuard<'a, T> {
}
}
impl<'a, T> Drop for GutexWriteGuard<'a, T> {
impl<'a, T> Drop for GutexWrite<'a, T> {
fn drop(&mut self) {
unsafe { *self.active = 0 };
}
}
impl<'a, T> Deref for GutexWriteGuard<'a, T> {
impl<'a, T> Deref for GutexWrite<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -36,16 +36,16 @@ impl<'a, T> Deref for GutexWriteGuard<'a, T> {
}
}
impl<'a, T> DerefMut for GutexWriteGuard<'a, T> {
impl<'a, T> DerefMut for GutexWrite<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.value }
}
}
impl<'a, T: Display> Display for GutexWriteGuard<'a, T> {
impl<'a, T: Display> Display for GutexWrite<'a, T> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
self.deref().fmt(f)
}
}
unsafe impl<'a, T: Sync> Sync for GutexWriteGuard<'a, T> {}
unsafe impl<'a, T: Sync> Sync for GutexWrite<'a, T> {}

View File

@ -57,7 +57,7 @@ pub struct Gutex<T> {
impl<T> Gutex<T> {
/// # Panics
/// If there are any active reader or writer.
pub fn write(&self) -> GutexWriteGuard<T> {
pub fn write(&self) -> GutexWrite<T> {
// Check if there are active reader or writer.
let lock = self.group.lock();
let active = self.active.get();
@ -72,7 +72,7 @@ impl<T> Gutex<T> {
*active = usize::MAX;
GutexWriteGuard::new(lock, active, self.value.get())
GutexWrite::new(lock, active, self.value.get())
}
}
}

View File

@ -18,7 +18,7 @@ impl<T> Mutex<T> {
///
/// # Context safety
/// This function does not require a CPU context.
pub fn new(data: T) -> Self {
pub const fn new(data: T) -> Self {
Self {
data: UnsafeCell::new(data),
owning: AtomicUsize::new(MTX_UNOWNED),

View File

@ -1,11 +1,13 @@
#![no_std]
#![cfg_attr(not(test), no_main)]
use crate::context::current_procmgr;
use crate::imgact::Ps4Abi;
use crate::malloc::KernelHeap;
use crate::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
use crate::sched::sleep;
use self::context::current_procmgr;
use self::imgact::Ps4Abi;
use self::malloc::{KernelHeap, Stage2};
use self::proc::{Fork, Proc, ProcAbi, ProcMgr, Thread};
use self::sched::sleep;
use self::uma::Uma;
use alloc::boxed::Box;
use alloc::sync::Arc;
use core::mem::zeroed;
use core::panic::PanicInfo;
@ -64,19 +66,20 @@ unsafe extern "C" fn _start(env: &'static BootEnv, conf: &'static Config) -> ! {
let thread0 = Thread::new_bare(proc0);
// Initialize foundations.
let uma = Uma::new();
let pmgr = ProcMgr::new();
// Activate CPU context.
let thread0 = Arc::new(thread0);
self::context::run_with_context(0, thread0, pmgr, cx, main);
self::context::run_with_context(0, thread0, pmgr, cx, move || main(uma));
}
fn main() -> ! {
fn main(mut uma: Uma) -> ! {
// Activate stage 2 heap.
info!("Activating stage 2 heap.");
unsafe { KERNEL_HEAP.activate_stage2() };
unsafe { KERNEL_HEAP.activate_stage2(Box::new(Stage2::new(&mut uma))) };
// Run sysinit vector. The PS4 use linker to put all sysinit functions in a list then loop the
// list to execute all of it. We manually execute those functions instead for readability. This

View File

@ -1,11 +1,12 @@
use self::stage1::Stage1;
use self::stage2::Stage2;
pub use self::stage2::Stage2;
use crate::lock::Mutex;
use alloc::boxed::Box;
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::cell::{RefCell, UnsafeCell};
use core::hint::unreachable_unchecked;
use core::ptr::{null_mut, NonNull};
use talc::{ClaimOnOom, Span, Talc};
mod stage1;
mod stage2;
/// Implementation of [`GlobalAlloc`] for objects belong to kernel space.
@ -13,9 +14,12 @@ mod stage2;
/// This allocator has 2 stages. The first stage will allocate a memory from a static buffer (AKA
/// arena). This stage will be primary used for bootstrapping the kernel. The second stage will be
/// activated once the required subsystems has been initialized.
///
/// The first stage is **not** thread safe so stage 2 must be activated before start a new CPU.
pub struct KernelHeap {
stage1: Stage1,
stage2: AtomicPtr<Stage2>,
stage: UnsafeCell<Stage>,
stage1_ptr: *const u8,
stage1_end: *const u8,
}
impl KernelHeap {
@ -23,37 +27,29 @@ impl KernelHeap {
/// The specified memory must be valid for reads and writes and it must be exclusively available
/// to [`KernelHeap`].
pub const unsafe fn new<const L: usize>(stage1: *mut [u8; L]) -> Self {
let stage1_ptr = stage1.cast();
let stage1 = Talc::new(ClaimOnOom::new(Span::from_array(stage1)));
Self {
stage1: Stage1::new(stage1),
stage2: AtomicPtr::new(null_mut()),
stage: UnsafeCell::new(Stage::One(RefCell::new(stage1))),
stage1_ptr,
stage1_end: stage1_ptr.add(L),
}
}
/// # Panics
/// If stage 2 already activated.
pub fn activate_stage2(&self) {
// Setup stage 2.
let state2 = Box::new(Stage2::new());
/// # Safety
/// This must be called by main CPU and can be called only once.
pub unsafe fn activate_stage2(&self, stage2: Box<Stage2>) {
// What we are going here is highly unsafe. Do not edit this code unless you know what you
// are doing!
let stage = self.stage.get();
let stage1 = match stage.read() {
Stage::One(v) => Mutex::new(v.into_inner()),
Stage::Two(_, _) => unreachable_unchecked(),
};
// Activate.
let state2 = Box::into_raw(state2);
assert!(self
.stage2
.compare_exchange(null_mut(), state2, Ordering::Release, Ordering::Relaxed)
.is_ok());
}
}
impl Drop for KernelHeap {
fn drop(&mut self) {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
let stage2 = self.stage2.load(Ordering::Acquire);
if !stage2.is_null() {
drop(unsafe { Box::from_raw(stage2) });
}
// Switch to stage 2 WITHOUT dropping the value contained in Stage::One.
stage.write(Stage::Two(stage2, stage1));
}
}
@ -63,29 +59,43 @@ unsafe impl GlobalAlloc for KernelHeap {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
// SAFETY: GlobalAlloc::alloc required layout to be non-zero.
self.stage2
.load(Ordering::Acquire)
.as_ref()
.map(|stage2| stage2.alloc(layout))
.unwrap_or_else(|| self.stage1.alloc(layout))
match &*self.stage.get() {
Stage::One(s) => s
.borrow_mut()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut()),
Stage::Two(s, _) => s.alloc(layout),
}
}
#[inline(never)]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// If stage 2 has not activated yet then this function is not allowed to access the CPU
// context due to it can be called before the context has been activated.
if self.stage1.is_owner(ptr) {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned from our
// GlobalAlloc::alloc and layout to be the same one that passed to it.
self.stage1.dealloc(ptr, layout);
} else {
// SAFETY: ptr is not owned by stage 1 so with the requirements of GlobalAlloc::dealloc
// the pr will be owned by stage 2 for sure.
self.stage2
.load(Ordering::Acquire)
.as_ref()
.unwrap()
.dealloc(ptr, layout);
match &*self.stage.get() {
Stage::One(s) => s.borrow_mut().free(NonNull::new_unchecked(ptr), layout),
Stage::Two(s2, s1) => {
if ptr.cast_const() >= self.stage1_ptr && ptr.cast_const() < self.stage1_end {
// SAFETY: GlobalAlloc::dealloc required ptr to be the same one that returned
// from our GlobalAlloc::alloc and layout to be the same one that passed to it.
s1.lock().free(NonNull::new_unchecked(ptr), layout)
} else {
// SAFETY: ptr is not owned by stage 1 so with the requirements of
// GlobalAlloc::dealloc the pr will be owned by stage 2 for sure.
s2.dealloc(ptr, layout);
}
}
}
}
}
// We impose restriction on the user to activate stage 2 before going multi-threaded.
unsafe impl Send for KernelHeap {}
unsafe impl Sync for KernelHeap {}
/// Stage of [KernelHeap].
enum Stage {
One(RefCell<Talc<ClaimOnOom>>),
Two(Box<Stage2>, Mutex<Talc<ClaimOnOom>>),
}

View File

@ -1,57 +0,0 @@
use core::alloc::Layout;
use core::ptr::{null_mut, NonNull};
use talc::{ClaimOnOom, Span, Talc};
/// Stage 1 kernel heap.
///
/// This stage is not allowed to access the CPU context due to it can be used before the context has
/// been activated.
///
/// This stage allocate a memory from a static buffer (AKA arena).
pub struct Stage1 {
engine: spin::Mutex<Talc<ClaimOnOom>>,
buf_ptr: *const u8,
buf_end: *const u8,
}
impl Stage1 {
/// # Safety
/// The specified memory must be valid for reads and writes and it must be exclusively available
/// to [`Stage1`].
pub const unsafe fn new<const L: usize>(buf: *mut [u8; L]) -> Self {
let engine = Talc::new(ClaimOnOom::new(Span::from_array(buf)));
let buf_ptr = buf.cast();
Self {
engine: spin::Mutex::new(engine),
buf_ptr,
buf_end: buf_ptr.add(L),
}
}
pub fn is_owner(&self, ptr: *const u8) -> bool {
ptr >= self.buf_ptr && ptr < self.buf_end
}
/// The returned pointer will always within the buffer that was specified in the
/// [`Self::new()`].
///
/// # Safety
/// `layout` must be nonzero.
pub unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.engine
.lock()
.malloc(layout)
.map(|v| v.as_ptr())
.unwrap_or(null_mut())
}
/// # Safety
/// `ptr` must be obtained with [`Self::alloc()`] and `layout` must be the same one that was
/// passed to that method.
pub unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.engine.lock().free(NonNull::new_unchecked(ptr), layout);
}
}
unsafe impl Sync for Stage1 {}

View File

@ -1,6 +1,6 @@
use crate::config::PAGE_SIZE;
use crate::context::{current_thread, CpuLocal};
use crate::uma::UmaZone;
use crate::uma::{Uma, UmaZone};
use alloc::string::ToString;
use alloc::sync::Arc;
use alloc::vec::Vec;
@ -24,7 +24,7 @@ impl Stage2 {
const KMEM_ZSIZE: usize = PAGE_SIZE.get() >> Self::KMEM_ZSHIFT;
/// See `kmeminit` on the PS4 for a reference.
pub fn new() -> Self {
pub fn new(uma: &mut Uma) -> Self {
// The possible of maximum alignment that Layout allowed is a bit before the most
// significant bit of isize (e.g. 0x4000000000000000 on 64 bit system). So we can use
// "size_of::<usize>() * 8 - 1" to get the size of array for all possible alignment.
@ -46,7 +46,7 @@ impl Stage2 {
}
// Create zone.
let zone = Arc::new(UmaZone::new(size.to_string().into(), size, align - 1));
let zone = Arc::new(uma.create_zone(size.to_string().into(), size, align - 1));
while last <= size.get() {
zones.push(zone.clone());

View File

@ -7,10 +7,14 @@ use core::cell::{RefCell, RefMut};
pub struct PrivateCell<T>(RefCell<T>);
impl<T> PrivateCell<T> {
/// # Context safety
/// This function does not require a CPU context.
pub fn new(v: T) -> Self {
Self(RefCell::new(v))
}
/// See [borrow_mut] for a safe wrapper.
///
/// # Safety
/// `owner` must be an owner of this field.
///
@ -22,6 +26,8 @@ impl<T> PrivateCell<T> {
}
fn validate(&self, owner: &Thread) {
// This check will optimized out for most of the time due to the implementation of
// current_thread() use "pure" + "nomem" on inline assembly.
let current = current_thread();
if !core::ptr::eq(BorrowedArc::as_ptr(&current), owner) {
@ -31,3 +37,12 @@ impl<T> PrivateCell<T> {
}
unsafe impl<T> Sync for PrivateCell<T> {}
/// Safe wrapper of [PrivateCell::borrow_mut()].
macro_rules! borrow_mut {
($t:ident, $f:ident) => {
unsafe { $t.$f.borrow_mut($t) }
};
}
pub(super) use borrow_mut;

View File

@ -1,6 +1,6 @@
use self::cell::PrivateCell;
use self::cell::{borrow_mut, PrivateCell};
use super::Proc;
use crate::lock::{Gutex, GutexGroup, GutexWriteGuard};
use crate::lock::{Gutex, GutexGroup, GutexWrite};
use alloc::sync::Arc;
use core::cell::RefMut;
use core::sync::atomic::{AtomicU8, Ordering};
@ -49,6 +49,7 @@ impl Thread {
}
pub fn can_sleep(&self) -> bool {
// Both of the values here can only modified by this thread so no race condition here.
let active_pins = self.active_pins.load(Ordering::Relaxed);
let active_interrupts = self.active_interrupts.load(Ordering::Relaxed);
@ -79,15 +80,17 @@ impl Thread {
/// # Panics
/// If called from the other thread.
pub fn active_mutexes_mut(&self) -> RefMut<u16> {
unsafe { self.active_mutexes.borrow_mut(self) }
borrow_mut!(self, active_mutexes)
}
/// Sleeping address. Zero if this thread is not in a sleep queue.
pub fn sleeping_mut(&self) -> GutexWriteGuard<usize> {
pub fn sleeping_mut(&self) -> GutexWrite<usize> {
self.sleeping.write()
}
/// # Panics
/// If called from the other thread.
pub fn profiling_ticks_mut(&self) -> RefMut<u32> {
unsafe { self.profiling_ticks.borrow_mut(self) }
borrow_mut!(self, profiling_ticks)
}
}

View File

@ -1,135 +1,28 @@
use self::bucket::UmaBucket;
use crate::context::{current_thread, CpuLocal};
use crate::lock::{Gutex, GutexGroup};
pub use self::zone::*;
use alloc::borrow::Cow;
use alloc::collections::VecDeque;
use core::cell::RefCell;
use core::num::NonZero;
use core::ops::DerefMut;
use core::ptr::null_mut;
mod bucket;
mod zone;
/// Implementation of `uma_zone` structure.
pub struct UmaZone {
size: NonZero<usize>, // uz_size
caches: CpuLocal<RefCell<UmaCache>>, // uz_cpu
full_buckets: Gutex<VecDeque<UmaBucket>>, // uz_full_bucket
free_buckets: Gutex<VecDeque<UmaBucket>>, // uz_free_bucket
alloc_count: Gutex<u64>, // uz_allocs
free_count: Gutex<u64>, // uz_frees
}
/// Implementation of UMA system.
pub struct Uma {}
impl Uma {
/// See `uma_startup` on the PS4 for a reference. Beware that our implementation cannot access
/// the CPU context due to this function can be called before context activation.
///
/// # Context safety
/// This function does not require a CPU context.
pub fn new() -> Self {
Self {}
}
impl UmaZone {
/// See `uma_zcreate` on the PS4 for a reference.
///
/// # Context safety
/// This function does not require a CPU context on **stage 1** heap.
pub fn new(_: Cow<'static, str>, size: NonZero<usize>, _: usize) -> Self {
// Ths PS4 allocate a new uma_zone from masterzone_z but we don't have that. This method
// basically an implementation of zone_ctor.
let gg = GutexGroup::new();
Self {
size,
caches: CpuLocal::new(|_| RefCell::default()),
full_buckets: gg.clone().spawn(VecDeque::new()),
free_buckets: gg.clone().spawn(VecDeque::new()),
alloc_count: gg.clone().spawn(0),
free_count: gg.spawn(0),
}
}
pub fn size(&self) -> NonZero<usize> {
self.size
}
/// See `uma_zalloc_arg` on the PS4 for a reference.
pub fn alloc(&self) -> *mut u8 {
// Our implementation imply M_WAITOK and M_ZERO.
let td = current_thread();
if !td.can_sleep() {
panic!("heap allocation in a non-sleeping context is not supported");
}
// Try allocate from per-CPU cache first so we don't need to acquire a mutex lock.
let caches = self.caches.lock();
let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
if !mem.is_null() {
return mem;
}
drop(caches); // Exit from non-sleeping context before acquire the mutex.
// Cache not found, allocate from the zone. We need to re-check the cache again because we
// may on a different CPU since we drop the CPU pinning on the above.
let mut frees = self.free_buckets.write();
let caches = self.caches.lock();
let mut cache = caches.borrow_mut();
let mem = Self::alloc_from_cache(&mut cache);
if !mem.is_null() {
return mem;
}
// TODO: What actually we are doing here?
*self.alloc_count.write() += core::mem::take(&mut cache.allocs);
*self.free_count.write() += core::mem::take(&mut cache.frees);
if let Some(b) = cache.alloc.take() {
frees.push_front(b);
}
if let Some(b) = self.full_buckets.write().pop_front() {
cache.alloc = Some(b);
// Seems like this should never fail.
let m = Self::alloc_from_cache(&mut cache);
assert!(!m.is_null());
return m;
}
drop(cache);
drop(caches);
// TODO: Why the PS4 check if this zone is zone_pack, zone_jumbop, zone_mbuf or zone_clust?
self.alloc_bucket();
todo!()
}
fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
while let Some(b) = &mut c.alloc {
if b.len() != 0 {
todo!()
}
if c.free.as_ref().is_some_and(|b| b.len() != 0) {
core::mem::swap(&mut c.alloc, &mut c.free);
continue;
}
break;
}
null_mut()
}
/// See `zone_alloc_bucket` on the PS4 for a reference.
fn alloc_bucket(&self) -> bool {
pub fn create_zone(&mut self, _: Cow<'static, str>, _: NonZero<usize>, _: usize) -> UmaZone {
todo!()
}
}
/// Implementation of `uma_cache` structure.
#[derive(Default)]
struct UmaCache {
alloc: Option<UmaBucket>, // uc_allocbucket
free: Option<UmaBucket>, // uc_freebucket
allocs: u64, // uc_allocs
frees: u64, // uc_frees
}

113
kernel/src/uma/zone.rs Normal file
View File

@ -0,0 +1,113 @@
use super::bucket::UmaBucket;
use crate::context::{current_thread, CpuLocal};
use crate::lock::Gutex;
use alloc::collections::VecDeque;
use core::cell::RefCell;
use core::num::NonZero;
use core::ops::DerefMut;
use core::ptr::null_mut;
/// Implementation of `uma_zone` structure.
pub struct UmaZone {
size: NonZero<usize>, // uz_size
caches: CpuLocal<RefCell<UmaCache>>, // uz_cpu
full_buckets: Gutex<VecDeque<UmaBucket>>, // uz_full_bucket
free_buckets: Gutex<VecDeque<UmaBucket>>, // uz_free_bucket
alloc_count: Gutex<u64>, // uz_allocs
free_count: Gutex<u64>, // uz_frees
}
impl UmaZone {
pub fn size(&self) -> NonZero<usize> {
self.size
}
/// See `uma_zalloc_arg` on the PS4 for a reference.
pub fn alloc(&self) -> *mut u8 {
// Our implementation imply M_WAITOK and M_ZERO.
let td = current_thread();
if !td.can_sleep() {
panic!("heap allocation in a non-sleeping context is not supported");
}
// Try allocate from per-CPU cache first so we don't need to acquire a mutex lock.
let caches = self.caches.lock();
let mem = Self::alloc_from_cache(caches.borrow_mut().deref_mut());
if !mem.is_null() {
return mem;
}
drop(caches); // Exit from non-sleeping context before acquire the mutex.
// Cache not found, allocate from the zone. We need to re-check the cache again because we
// may on a different CPU since we drop the CPU pinning on the above.
let mut frees = self.free_buckets.write();
let caches = self.caches.lock();
let mut cache = caches.borrow_mut();
let mem = Self::alloc_from_cache(&mut cache);
if !mem.is_null() {
return mem;
}
// TODO: What actually we are doing here?
*self.alloc_count.write() += core::mem::take(&mut cache.allocs);
*self.free_count.write() += core::mem::take(&mut cache.frees);
if let Some(b) = cache.alloc.take() {
frees.push_front(b);
}
if let Some(b) = self.full_buckets.write().pop_front() {
cache.alloc = Some(b);
// Seems like this should never fail.
let m = Self::alloc_from_cache(&mut cache);
assert!(!m.is_null());
return m;
}
drop(cache);
drop(caches);
// TODO: Why the PS4 check if this zone is zone_pack, zone_jumbop, zone_mbuf or zone_clust?
self.alloc_bucket();
todo!()
}
fn alloc_from_cache(c: &mut UmaCache) -> *mut u8 {
while let Some(b) = &mut c.alloc {
if b.len() != 0 {
todo!()
}
if c.free.as_ref().is_some_and(|b| b.len() != 0) {
core::mem::swap(&mut c.alloc, &mut c.free);
continue;
}
break;
}
null_mut()
}
/// See `zone_alloc_bucket` on the PS4 for a reference.
fn alloc_bucket(&self) -> bool {
todo!()
}
}
/// Implementation of `uma_cache` structure.
#[derive(Default)]
struct UmaCache {
alloc: Option<UmaBucket>, // uc_allocbucket
free: Option<UmaBucket>, // uc_freebucket
allocs: u64, // uc_allocs
frees: u64, // uc_frees
}