From 2372243b41abaa437dbb51085083f79304341014 Mon Sep 17 00:00:00 2001 From: Putta Khunchalee Date: Wed, 16 Oct 2024 02:18:22 +0700 Subject: [PATCH] Removes hard-coded RAM size (#1038) --- gui/kvm.cpp | 23 ------------------- gui/src/vmm/debug/controller.rs | 40 ++++++++++++++++++++++++++------- gui/src/vmm/hv/linux/ffi.rs | 19 +++++++++------- gui/src/vmm/hv/linux/mod.rs | 18 +++++++++------ gui/src/vmm/hv/macos/mod.rs | 2 +- gui/src/vmm/hv/windows/mod.rs | 8 +++++-- gui/src/vmm/mod.rs | 8 ++++--- gui/src/vmm/ram/mod.rs | 31 ++++++++++++++++--------- 8 files changed, 86 insertions(+), 63 deletions(-) diff --git a/gui/kvm.cpp b/gui/kvm.cpp index 2e3eb62b..68749c90 100644 --- a/gui/kvm.cpp +++ b/gui/kvm.cpp @@ -9,29 +9,6 @@ #include #include -extern "C" int kvm_set_user_memory_region( - int vm, - uint32_t slot, - uint64_t addr, - uint64_t len, - void *mem) -{ - kvm_userspace_memory_region mr; - - memset(&mr, 0, sizeof(mr)); - - mr.slot = slot; - mr.guest_phys_addr = addr; - mr.memory_size = len; - mr.userspace_addr = reinterpret_cast(mem); - - if (ioctl(vm, KVM_SET_USER_MEMORY_REGION, &mr) < 0) { - return errno; - } - - return 0; -} - extern "C" int kvm_run(int vcpu) { return ioctl(vcpu, KVM_RUN, 0); diff --git a/gui/src/vmm/debug/controller.rs b/gui/src/vmm/debug/controller.rs index 7715d48c..fee3c358 100644 --- a/gui/src/vmm/debug/controller.rs +++ b/gui/src/vmm/debug/controller.rs @@ -92,7 +92,11 @@ impl Debugger { if matches!(s.deref(), DataState::None) { *s = DataState::DebuggerOwned(v); - return ResponseHandle(&self.0); + + return ResponseHandle { + data: &self.0, + taken: false, + }; } // Once the debugger has been requested the data it will wait for the data and a signal from @@ -121,27 +125,47 @@ impl Debugger { // wake them up. Condvar::notify_one do nothing if there are no any thread waiting on it. self.0.signal.notify_one(); - ResponseHandle(&self.0) + ResponseHandle { + data: &self.0, + taken: false, + } } } /// Provides method to get a response from the debugger. -pub struct ResponseHandle<'a, T>(&'a Data); +pub struct ResponseHandle<'a, T> { + data: &'a Data, + taken: bool, +} impl<'a, T> ResponseHandle<'a, T> { - pub fn into_response(self) -> T { - let mut s = self.0.state.lock().unwrap(); + pub fn into_response(mut self) -> T { + let mut s = self.data.state.lock().unwrap(); let v = match std::mem::take(s.deref_mut()) { DataState::DebuggeeOwned(v) => v, _ => panic!("the debugger did not release the data"), }; + self.taken = true; + + v + } +} + +impl<'a, T> Drop for ResponseHandle<'a, T> { + fn drop(&mut self) { + if !self.taken { + let mut s = self.data.state.lock().unwrap(); + + if !matches!(std::mem::take(s.deref_mut()), DataState::DebuggeeOwned(_)) { + panic!("the debugger did not release the data"); + } + } + // It is possible for this method to get called after the debugger has reacquired the lock // so we need to wake them up. Condvar::notify_one do nothing if there are no any thread // waiting on it. - self.0.signal.notify_one(); - - v + self.data.signal.notify_one(); } } diff --git a/gui/src/vmm/hv/linux/ffi.rs b/gui/src/vmm/hv/linux/ffi.rs index 9d054775..f3f8350e 100644 --- a/gui/src/vmm/hv/linux/ffi.rs +++ b/gui/src/vmm/hv/linux/ffi.rs @@ -1,10 +1,11 @@ -use std::ffi::{c_int, c_ulong, c_void}; +use std::ffi::{c_int, c_ulong}; pub const KVM_GET_API_VERSION: c_ulong = _IO(KVMIO, 0x00); pub const KVM_CREATE_VM: c_ulong = _IO(KVMIO, 0x01); pub const KVM_CHECK_EXTENSION: c_ulong = _IO(KVMIO, 0x03); pub const KVM_GET_VCPU_MMAP_SIZE: c_ulong = _IO(KVMIO, 0x04); pub const KVM_CREATE_VCPU: c_ulong = _IO(KVMIO, 0x41); +pub const KVM_SET_USER_MEMORY_REGION: c_ulong = _IOW::(KVMIO, 0x46); #[cfg(target_arch = "aarch64")] pub const KVM_GET_ONE_REG: c_ulong = _IOW::>(KVMIO, 0xab); #[cfg(target_arch = "aarch64")] @@ -78,6 +79,15 @@ const fn _IOC(dir: c_ulong, ty: c_ulong, nr: c_ulong, size: c_ulong) -> c_ulong | (size << _IOC_SIZESHIFT) } +#[repr(C)] +pub struct KvmUserspaceMemoryRegion { + pub slot: u32, + pub flags: u32, + pub guest_phys_addr: u64, + pub memory_size: u64, + pub userspace_addr: u64, +} + #[cfg(target_arch = "aarch64")] #[repr(C)] pub struct KvmOneReg<'a, T> { @@ -93,12 +103,5 @@ pub struct KvmVcpuInit { } extern "C" { - pub fn kvm_set_user_memory_region( - vm: c_int, - slot: u32, - addr: u64, - len: u64, - mem: *mut c_void, - ) -> c_int; pub fn kvm_run(vcpu: c_int) -> c_int; } diff --git a/gui/src/vmm/hv/linux/mod.rs b/gui/src/vmm/hv/linux/mod.rs index 3216aaba..b211c481 100644 --- a/gui/src/vmm/hv/linux/mod.rs +++ b/gui/src/vmm/hv/linux/mod.rs @@ -1,8 +1,9 @@ // SPDX-License-Identifier: MIT OR Apache-2.0 use self::cpu::KvmCpu; use self::ffi::{ - kvm_set_user_memory_region, KVM_API_VERSION, KVM_CAP_MAX_VCPUS, KVM_CHECK_EXTENSION, + KvmUserspaceMemoryRegion, KVM_API_VERSION, KVM_CAP_MAX_VCPUS, KVM_CHECK_EXTENSION, KVM_CREATE_VCPU, KVM_CREATE_VM, KVM_GET_API_VERSION, KVM_GET_VCPU_MMAP_SIZE, + KVM_SET_USER_MEMORY_REGION, }; use super::{CpuFeats, Hypervisor}; use crate::vmm::ram::Ram; @@ -81,13 +82,16 @@ pub fn new(cpu: usize, ram: Ram) -> Result { }; // Set RAM. - let slot = 0; - let len = ram.len().try_into().unwrap(); - let mem = ram.host_addr().cast_mut().cast(); + let mr = KvmUserspaceMemoryRegion { + slot: 0, + flags: 0, + guest_phys_addr: 0, + memory_size: ram.len().get().try_into().unwrap(), + userspace_addr: (ram.host_addr() as usize).try_into().unwrap(), + }; - match unsafe { kvm_set_user_memory_region(vm.as_raw_fd(), slot, 0, len, mem) } { - 0 => {} - v => return Err(VmmError::MapRamFailed(Error::from_raw_os_error(v))), + if unsafe { ioctl(vm.as_raw_fd(), KVM_SET_USER_MEMORY_REGION, &mr) } < 0 { + return Err(VmmError::MapRamFailed(Error::last_os_error())); } // AArch64 require all CPU to be created before calling KVM_ARM_VCPU_INIT. diff --git a/gui/src/vmm/hv/macos/mod.rs b/gui/src/vmm/hv/macos/mod.rs index af92c886..828efa33 100644 --- a/gui/src/vmm/hv/macos/mod.rs +++ b/gui/src/vmm/hv/macos/mod.rs @@ -46,7 +46,7 @@ pub fn new(_: usize, ram: Ram) -> Result { // Set RAM. let host = hv.ram.host_addr().cast_mut().cast(); - let len = hv.ram.len().try_into().unwrap(); + let len = hv.ram.len().get().try_into().unwrap(); let ret = unsafe { hv_vm_map( host, diff --git a/gui/src/vmm/hv/windows/mod.rs b/gui/src/vmm/hv/windows/mod.rs index 232bb1e2..06d19a61 100644 --- a/gui/src/vmm/hv/windows/mod.rs +++ b/gui/src/vmm/hv/windows/mod.rs @@ -19,8 +19,12 @@ pub fn new(cpu: usize, ram: Ram) -> Result { part.setup().map_err(VmmError::SetupPartitionFailed)?; // Map memory. - part.map_gpa(ram.host_addr().cast(), 0, ram.len().try_into().unwrap()) - .map_err(VmmError::MapRamFailed)?; + part.map_gpa( + ram.host_addr().cast(), + 0, + ram.len().get().try_into().unwrap(), + ) + .map_err(VmmError::MapRamFailed)?; Ok(Whp { part, diff --git a/gui/src/vmm/mod.rs b/gui/src/vmm/mod.rs index 8f34aac9..c95b85de 100644 --- a/gui/src/vmm/mod.rs +++ b/gui/src/vmm/mod.rs @@ -329,7 +329,7 @@ pub unsafe extern "C" fn vmm_start( }; // Setup RAM. - let ram = match Ram::new(block_size) { + let ram = match Ram::new(NonZero::new(1024 * 1024 * 1024 * 8).unwrap(), block_size) { Ok(v) => v, Err(e) => { *err = RustError::with_source("couldn't create a RAM", e).into_c(); @@ -337,6 +337,10 @@ pub unsafe extern "C" fn vmm_start( } }; + // Setup virtual devices. + let event = VmmEventHandler { fp: event, cx }; + let devices = Arc::new(setup_devices(ram.len().get(), block_size, event)); + // Setup hypervisor. let mut hv = match self::hv::new(8, ram) { Ok(v) => v, @@ -401,8 +405,6 @@ pub unsafe extern "C" fn vmm_start( } // Allocate arguments. - let event = VmmEventHandler { fp: event, cx }; - let devices = Arc::new(setup_devices(Ram::SIZE, block_size, event)); let env = BootEnv::Vm(Vm { vmm: devices.vmm().addr(), console: devices.console().addr(), diff --git a/gui/src/vmm/ram/mod.rs b/gui/src/vmm/ram/mod.rs index ec9db8c9..b706005a 100644 --- a/gui/src/vmm/ram/mod.rs +++ b/gui/src/vmm/ram/mod.rs @@ -16,15 +16,14 @@ mod builder; /// RAM always started at address 0. pub struct Ram { mem: *mut u8, + len: NonZero, block_size: NonZero, } impl Ram { - pub(crate) const SIZE: usize = 1024 * 1024 * 1024 * 8; // 8GB - /// # Safety /// `block_size` must be greater or equal host page size. - pub unsafe fn new(block_size: NonZero) -> Result { + pub unsafe fn new(len: NonZero, block_size: NonZero) -> Result { use std::io::Error; // Reserve memory range. @@ -35,7 +34,7 @@ impl Ram { let mem = mmap( null_mut(), - Self::SIZE, + len.get(), PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, @@ -54,7 +53,7 @@ impl Ram { use std::ptr::null; use windows_sys::Win32::System::Memory::{VirtualAlloc, MEM_RESERVE, PAGE_NOACCESS}; - let mem = VirtualAlloc(null(), Self::SIZE, MEM_RESERVE, PAGE_NOACCESS); + let mem = VirtualAlloc(null(), len.get(), MEM_RESERVE, PAGE_NOACCESS); if mem.is_null() { return Err(Error::last_os_error()); @@ -63,15 +62,19 @@ impl Ram { mem.cast() }; - Ok(Self { mem, block_size }) + Ok(Self { + mem, + len, + block_size, + }) } pub fn host_addr(&self) -> *const u8 { self.mem } - pub fn len(&self) -> usize { - Self::SIZE + pub fn len(&self) -> NonZero { + self.len } pub fn builder(&mut self) -> RamBuilder { @@ -88,7 +91,10 @@ impl Ram { assert_eq!(addr % self.block_size, 0); assert_eq!(len.get() % self.block_size, 0); - if !addr.checked_add(len.get()).is_some_and(|v| v <= Self::SIZE) { + if !addr + .checked_add(len.get()) + .is_some_and(|v| v <= self.len.get()) + { return Err(RamError::InvalidAddr); } @@ -106,7 +112,10 @@ impl Ram { assert_eq!(addr % self.block_size, 0); assert_eq!(len.get() % self.block_size, 0); - if !addr.checked_add(len.get()).is_some_and(|v| v <= Self::SIZE) { + if !addr + .checked_add(len.get()) + .is_some_and(|v| v <= self.len.get()) + { return Err(RamError::InvalidAddr); } @@ -174,7 +183,7 @@ impl Drop for Ram { fn drop(&mut self) { use libc::munmap; - if unsafe { munmap(self.mem.cast(), Self::SIZE) } < 0 { + if unsafe { munmap(self.mem.cast(), self.len.get()) } < 0 { panic!( "failed to unmap RAM at {:p}: {}", self.mem,