Makes RAM belong to hypervisor (#976)
Some checks failed
Development Build / Build (push) Failing after 22s
Development Build / Update PRs (push) Failing after 15s
Housekeep / Housekeep (push) Failing after 15s

This commit is contained in:
Putta Khunchalee 2024-09-10 01:45:48 +07:00 committed by GitHub
parent 1e68099365
commit 376f8ce99e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 268 additions and 292 deletions

View File

@ -170,42 +170,6 @@ struct Vmm *vmm_run(const char *kernel,
struct RustError *vmm_draw(struct Vmm *vmm); struct RustError *vmm_draw(struct Vmm *vmm);
#if defined(__linux__)
extern int kvm_check_version(int kvm, bool *compat);
#endif
#if defined(__linux__)
extern int kvm_max_vcpus(int kvm, size_t *max);
#endif
#if defined(__linux__)
extern int kvm_create_vm(int kvm, int *fd);
#endif
#if defined(__linux__)
extern int kvm_get_vcpu_mmap_size(int kvm);
#endif
#if defined(__linux__)
extern int kvm_set_user_memory_region(int vm,
uint32_t slot,
uint64_t addr,
uint64_t len,
void *mem);
#endif
#if defined(__linux__)
extern int kvm_create_vcpu(int vm, uint32_t id, int *fd);
#endif
#if defined(__linux__)
extern int kvm_run(int vcpu);
#endif
#if defined(__linux__)
extern int kvm_translate(int vcpu, kvm_translation *arg);
#endif
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif // __cplusplus #endif // __cplusplus

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT OR Apache-2.0 // SPDX-License-Identifier: MIT OR Apache-2.0
use super::hv::{Cpu, CpuFeats, CpuStates, Pstate}; use super::hv::{Cpu, CpuFeats, CpuStates, Pstate};
use super::hw::RamMap; use super::ram::RamMap;
use super::MainCpuError; use super::MainCpuError;
pub fn setup_main_cpu( pub fn setup_main_cpu(

View File

@ -5,7 +5,7 @@ use self::ffi::{
kvm_set_user_memory_region, kvm_set_user_memory_region,
}; };
use super::{CpuFeats, Hypervisor}; use super::{CpuFeats, Hypervisor};
use crate::vmm::hw::Ram; use crate::vmm::ram::Ram;
use crate::vmm::VmmError; use crate::vmm::VmmError;
use libc::{mmap, open, MAP_FAILED, MAP_PRIVATE, O_RDWR, PROT_READ, PROT_WRITE}; use libc::{mmap, open, MAP_FAILED, MAP_PRIVATE, O_RDWR, PROT_READ, PROT_WRITE};
use std::os::fd::{AsRawFd, FromRawFd, OwnedFd}; use std::os::fd::{AsRawFd, FromRawFd, OwnedFd};
@ -20,89 +20,86 @@ mod ffi;
mod regs; mod regs;
mod run; mod run;
pub fn new(cpu: usize, ram: Ram) -> Result<impl Hypervisor, VmmError> {
use std::io::Error;
// Open KVM device.
let kvm = unsafe { open(b"/dev/kvm\0".as_ptr().cast(), O_RDWR) };
if kvm < 0 {
return Err(VmmError::OpenKvmFailed(Error::last_os_error()));
}
// Check KVM version.
let kvm = unsafe { OwnedFd::from_raw_fd(kvm) };
let mut compat = false;
match unsafe { kvm_check_version(kvm.as_raw_fd(), &mut compat) } {
0 if !compat => {
return Err(VmmError::KvmVersionMismatched);
}
0 => {}
v => return Err(VmmError::GetKvmVersionFailed(Error::from_raw_os_error(v))),
}
// Check max CPU.
let mut max = 0;
match unsafe { kvm_max_vcpus(kvm.as_raw_fd(), &mut max) } {
0 => {}
v => {
return Err(VmmError::GetMaxCpuFailed(Error::from_raw_os_error(v)));
}
}
if max < cpu {
return Err(VmmError::MaxCpuTooLow);
}
// Get size of CPU context.
let vcpu_mmap_size = match unsafe { kvm_get_vcpu_mmap_size(kvm.as_raw_fd()) } {
size @ 0.. => size as usize,
_ => return Err(VmmError::GetMmapSizeFailed(Error::last_os_error())),
};
// Create a VM.
let mut vm = -1;
match unsafe { kvm_create_vm(kvm.as_raw_fd(), &mut vm) } {
0 => {}
v => return Err(VmmError::CreateVmFailed(Error::from_raw_os_error(v))),
}
// Set RAM.
let vm = unsafe { OwnedFd::from_raw_fd(vm) };
let slot = 0;
let len = ram.len().try_into().unwrap();
let mem = ram.host_addr().cast_mut().cast();
match unsafe { kvm_set_user_memory_region(vm.as_raw_fd(), slot, 0, len, mem) } {
0 => {}
v => return Err(VmmError::MapRamFailed(Error::from_raw_os_error(v))),
}
Ok(Kvm {
vcpu_mmap_size,
vm,
ram,
kvm,
})
}
/// Implementation of [`Hypervisor`] using KVM. /// Implementation of [`Hypervisor`] using KVM.
/// ///
/// Fields in this struct need to drop in a correct order (e.g. vm must be dropped before ram). /// Fields in this struct need to drop in a correct order (e.g. vm must be dropped before ram).
pub struct Kvm { struct Kvm {
vcpu_mmap_size: usize, vcpu_mmap_size: usize,
vm: OwnedFd, vm: OwnedFd,
#[allow(dead_code)] // ram are needed by vm. ram: Ram,
ram: Arc<Ram>,
#[allow(dead_code)] // kvm are needed by vm. #[allow(dead_code)] // kvm are needed by vm.
kvm: OwnedFd, kvm: OwnedFd,
} }
impl Kvm {
pub fn new(cpu: usize, ram: Arc<Ram>) -> Result<Self, VmmError> {
use std::io::Error;
// Open KVM device.
let kvm = unsafe { open(b"/dev/kvm\0".as_ptr().cast(), O_RDWR) };
if kvm < 0 {
return Err(VmmError::OpenKvmFailed(Error::last_os_error()));
}
// Check KVM version.
let kvm = unsafe { OwnedFd::from_raw_fd(kvm) };
let mut compat = false;
match unsafe { kvm_check_version(kvm.as_raw_fd(), &mut compat) } {
0 if !compat => {
return Err(VmmError::KvmVersionMismatched);
}
0 => {}
v => return Err(VmmError::GetKvmVersionFailed(Error::from_raw_os_error(v))),
}
// Check max CPU.
let mut max = 0;
match unsafe { kvm_max_vcpus(kvm.as_raw_fd(), &mut max) } {
0 => {}
v => {
return Err(VmmError::GetMaxCpuFailed(Error::from_raw_os_error(v)));
}
}
if max < cpu {
return Err(VmmError::MaxCpuTooLow);
}
// Get size of CPU context.
let vcpu_mmap_size = match unsafe { kvm_get_vcpu_mmap_size(kvm.as_raw_fd()) } {
size @ 0.. => size as usize,
_ => return Err(VmmError::GetMmapSizeFailed(Error::last_os_error())),
};
// Create a VM.
let mut vm = -1;
match unsafe { kvm_create_vm(kvm.as_raw_fd(), &mut vm) } {
0 => {}
v => return Err(VmmError::CreateVmFailed(Error::from_raw_os_error(v))),
}
// Set RAM.
let vm = unsafe { OwnedFd::from_raw_fd(vm) };
let slot = 0;
let len = ram.len().try_into().unwrap();
let mem = ram.host_addr().cast_mut().cast();
match unsafe { kvm_set_user_memory_region(vm.as_raw_fd(), slot, 0, len, mem) } {
0 => {}
v => return Err(VmmError::MapRamFailed(Error::from_raw_os_error(v))),
}
Ok(Self {
vcpu_mmap_size,
vm,
ram,
kvm,
})
}
}
impl Hypervisor for Kvm { impl Hypervisor for Kvm {
type Cpu<'a> = KvmCpu<'a>; type Cpu<'a> = KvmCpu<'a>;
type CpuErr = KvmCpuError; type CpuErr = KvmCpuError;
@ -111,6 +108,14 @@ impl Hypervisor for Kvm {
Ok(CpuFeats {}) Ok(CpuFeats {})
} }
fn ram(&self) -> &Ram {
&self.ram
}
fn ram_mut(&mut self) -> &mut Ram {
&mut self.ram
}
fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> { fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> {
use std::io::Error; use std::io::Error;

View File

@ -2,12 +2,11 @@
use self::cpu::HfCpu; use self::cpu::HfCpu;
use self::vm::Vm; use self::vm::Vm;
use super::{CpuFeats, Hypervisor}; use super::{CpuFeats, Hypervisor};
use crate::vmm::hw::Ram; use crate::vmm::ram::Ram;
use crate::vmm::VmmError; use crate::vmm::VmmError;
use hv_sys::hv_vcpu_create; use hv_sys::hv_vcpu_create;
use std::ffi::c_int; use std::ffi::c_int;
use std::num::NonZero; use std::num::NonZero;
use std::sync::Arc;
use thiserror::Error; use thiserror::Error;
#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")] #[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
@ -16,25 +15,23 @@ mod arch;
mod cpu; mod cpu;
mod vm; mod vm;
pub fn new(_: usize, ram: Ram) -> Result<impl Hypervisor, VmmError> {
// Create a VM.
let vm = Vm::new().map_err(VmmError::CreateVmFailed)?;
// Map memory.
vm.vm_map(ram.host_addr().cast_mut().cast(), 0, ram.len())
.map_err(VmmError::MapRamFailed)?;
Ok(Hf { vm, ram })
}
/// Implementation of [`Hypervisor`] using Hypervisor Framework. /// Implementation of [`Hypervisor`] using Hypervisor Framework.
/// ///
/// Fields in this struct need to drop in a correct order. /// Fields in this struct need to drop in a correct order.
pub struct Hf { struct Hf {
vm: Vm, vm: Vm,
ram: Arc<Ram>, ram: Ram,
}
impl Hf {
pub fn new(_: usize, ram: Arc<Ram>) -> Result<Self, VmmError> {
// Create a VM.
let vm = Vm::new().map_err(VmmError::CreateVmFailed)?;
// Map memory.
vm.vm_map(ram.host_addr().cast_mut().cast(), 0, ram.len())
.map_err(VmmError::MapRamFailed)?;
Ok(Self { vm, ram })
}
} }
impl Hypervisor for Hf { impl Hypervisor for Hf {
@ -120,6 +117,14 @@ impl Hypervisor for Hf {
Ok(CpuFeats {}) Ok(CpuFeats {})
} }
fn ram(&self) -> &Ram {
&self.ram
}
fn ram_mut(&mut self) -> &mut Ram {
&mut self.ram
}
fn create_cpu(&self, _: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> { fn create_cpu(&self, _: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> {
let mut instance = 0; let mut instance = 0;

View File

@ -1,35 +1,17 @@
// SPDX-License-Identifier: MIT OR Apache-2.0 // SPDX-License-Identifier: MIT OR Apache-2.0
use super::hw::Ram; use super::ram::Ram;
use super::VmmError;
use std::error::Error; use std::error::Error;
use std::sync::Arc;
pub use self::arch::*; pub use self::arch::*;
pub use self::os::new;
#[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")] #[cfg_attr(target_arch = "aarch64", path = "aarch64.rs")]
#[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")] #[cfg_attr(target_arch = "x86_64", path = "x86_64.rs")]
mod arch; mod arch;
#[cfg(target_os = "linux")] #[cfg_attr(target_os = "linux", path = "linux/mod.rs")]
mod linux; #[cfg_attr(target_os = "macos", path = "macos/mod.rs")]
#[cfg(target_os = "macos")] #[cfg_attr(target_os = "windows", path = "windows/mod.rs")]
mod macos; mod os;
#[cfg(target_os = "windows")]
mod windows;
#[cfg(target_os = "linux")]
pub fn new(cpu: usize, ram: Arc<Ram>) -> Result<impl Hypervisor, VmmError> {
self::linux::Kvm::new(cpu, ram)
}
#[cfg(target_os = "windows")]
pub fn new(cpu: usize, ram: Arc<Ram>) -> Result<impl Hypervisor, VmmError> {
self::windows::Whp::new(cpu, ram)
}
#[cfg(target_os = "macos")]
pub fn new(cpu: usize, ram: Arc<Ram>) -> Result<impl Hypervisor, VmmError> {
self::macos::Hf::new(cpu, ram)
}
/// Underlying hypervisor (e.g. KVM on Linux). /// Underlying hypervisor (e.g. KVM on Linux).
pub trait Hypervisor: Send + Sync { pub trait Hypervisor: Send + Sync {
@ -39,6 +21,8 @@ pub trait Hypervisor: Send + Sync {
type CpuErr: Error + Send + 'static; type CpuErr: Error + Send + 'static;
fn cpu_features(&mut self) -> Result<CpuFeats, Self::CpuErr>; fn cpu_features(&mut self) -> Result<CpuFeats, Self::CpuErr>;
fn ram(&self) -> &Ram;
fn ram_mut(&mut self) -> &mut Ram;
/// This method must be called by a thread that is going to drive the returned CPU. /// This method must be called by a thread that is going to drive the returned CPU.
fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr>; fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr>;

View File

@ -1,7 +1,7 @@
use self::cpu::WhpCpu; use self::cpu::WhpCpu;
use self::partition::Partition; use self::partition::Partition;
use super::{CpuFeats, Hypervisor}; use super::{CpuFeats, Hypervisor};
use crate::vmm::hw::Ram; use crate::vmm::ram::Ram;
use crate::vmm::VmmError; use crate::vmm::VmmError;
use std::sync::Arc; use std::sync::Arc;
use thiserror::Error; use thiserror::Error;
@ -10,29 +10,27 @@ use windows_sys::core::HRESULT;
mod cpu; mod cpu;
mod partition; mod partition;
pub fn new(cpu: usize, ram: Ram) -> Result<impl Hypervisor, VmmError> {
// Setup a partition.
let mut part = Partition::new().map_err(VmmError::CreatePartitionFailed)?;
part.set_processor_count(cpu)
.map_err(VmmError::SetCpuCountFailed)?;
part.setup().map_err(VmmError::SetupPartitionFailed)?;
// Map memory.
part.map_gpa(ram.host_addr().cast(), 0, ram.len().try_into().unwrap())
.map_err(VmmError::MapRamFailed)?;
Ok(Whp { part, ram })
}
/// Implementation of [`Hypervisor`] using Windows Hypervisor Platform. /// Implementation of [`Hypervisor`] using Windows Hypervisor Platform.
/// ///
/// Fields in this struct need to drop in a correct order. /// Fields in this struct need to drop in a correct order.
pub struct Whp { struct Whp {
part: Partition, part: Partition,
ram: Arc<Ram>, ram: Ram,
}
impl Whp {
pub fn new(cpu: usize, ram: Arc<Ram>) -> Result<Self, VmmError> {
// Setup a partition.
let mut part = Partition::new().map_err(VmmError::CreatePartitionFailed)?;
part.set_processor_count(cpu)
.map_err(VmmError::SetCpuCountFailed)?;
part.setup().map_err(VmmError::SetupPartitionFailed)?;
// Map memory.
part.map_gpa(ram.host_addr().cast(), 0, ram.len().try_into().unwrap())
.map_err(VmmError::MapRamFailed)?;
Ok(Self { part, ram })
}
} }
impl Hypervisor for Whp { impl Hypervisor for Whp {
@ -43,6 +41,14 @@ impl Hypervisor for Whp {
Ok(CpuFeats {}) Ok(CpuFeats {})
} }
fn ram(&self) -> &Ram {
&self.ram
}
fn ram_mut(&mut self) -> &mut Ram {
&mut self.ram
}
fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> { fn create_cpu(&self, id: usize) -> Result<Self::Cpu<'_>, Self::CpuErr> {
let id = id.try_into().unwrap(); let id = id.try_into().unwrap();

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
use super::Console; use super::Console;
use crate::vmm::hv::{CpuIo, IoBuf}; use crate::vmm::hv::{CpuIo, Hypervisor, IoBuf};
use crate::vmm::hw::{DeviceContext, Ram}; use crate::vmm::hw::DeviceContext;
use crate::vmm::VmmEvent; use crate::vmm::VmmEvent;
use obvirt::console::{Memory, MsgType}; use obvirt::console::{Memory, MsgType};
use std::error::Error; use std::error::Error;
@ -8,18 +9,18 @@ use std::mem::offset_of;
use thiserror::Error; use thiserror::Error;
/// Implementation of [`DeviceContext`]. /// Implementation of [`DeviceContext`].
pub struct Context<'a> { pub struct Context<'a, H> {
dev: &'a Console, dev: &'a Console,
ram: &'a Ram, hv: &'a H,
msg_len: usize, msg_len: usize,
msg: String, msg: String,
} }
impl<'a> Context<'a> { impl<'a, H: Hypervisor> Context<'a, H> {
pub fn new(dev: &'a Console, ram: &'a Ram) -> Self { pub fn new(dev: &'a Console, hv: &'a H) -> Self {
Self { Self {
dev, dev,
ram, hv,
msg_len: 0, msg_len: 0,
msg: String::new(), msg: String::new(),
} }
@ -75,14 +76,14 @@ impl<'a> Context<'a> {
.map_err(|e| ExecError::TranslateVaddrFailed(vaddr, e))?; .map_err(|e| ExecError::TranslateVaddrFailed(vaddr, e))?;
// Read data. // Read data.
let data = unsafe { self.ram.host_addr().add(paddr) }; let data = unsafe { self.hv.ram().host_addr().add(paddr) };
let data = unsafe { std::slice::from_raw_parts(data, len) }; let data = unsafe { std::slice::from_raw_parts(data, len) };
Ok(std::str::from_utf8(data).unwrap()) Ok(std::str::from_utf8(data).unwrap())
} }
} }
impl<'a> DeviceContext for Context<'a> { impl<'a, H: Hypervisor> DeviceContext for Context<'a, H> {
fn exec(&mut self, exit: &mut dyn CpuIo) -> Result<bool, Box<dyn Error>> { fn exec(&mut self, exit: &mut dyn CpuIo) -> Result<bool, Box<dyn Error>> {
// Check field. // Check field.
let off = exit.addr() - self.dev.addr; let off = exit.addr() - self.dev.addr;

View File

@ -1,5 +1,7 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
use self::context::Context; use self::context::Context;
use super::{Device, DeviceContext, Ram}; use super::{Device, DeviceContext};
use crate::vmm::hv::Hypervisor;
use crate::vmm::VmmEventHandler; use crate::vmm::VmmEventHandler;
use obvirt::console::Memory; use obvirt::console::Memory;
use std::num::NonZero; use std::num::NonZero;
@ -26,7 +28,7 @@ impl Console {
} }
} }
impl Device for Console { impl<H: Hypervisor> Device<H> for Console {
fn addr(&self) -> usize { fn addr(&self) -> usize {
self.addr self.addr
} }
@ -35,7 +37,7 @@ impl Device for Console {
self.len self.len
} }
fn create_context<'a>(&'a self, ram: &'a Ram) -> Box<dyn DeviceContext + 'a> { fn create_context<'a>(&'a self, hv: &'a H) -> Box<dyn DeviceContext + 'a> {
Box::new(Context::new(self, ram)) Box::new(Context::new(self, hv))
} }
} }

View File

@ -1,28 +1,29 @@
// SPDX-License-Identifier: MIT OR Apache-2.0
use super::hv::{CpuIo, Hypervisor};
use super::VmmEventHandler; use super::VmmEventHandler;
use crate::vmm::hv::CpuIo;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::error::Error; use std::error::Error;
use std::num::NonZero; use std::num::NonZero;
use std::sync::Arc; use std::sync::Arc;
pub use self::console::*; pub use self::console::*;
pub use self::ram::*;
mod console; mod console;
mod ram;
pub fn setup_devices( pub fn setup_devices<H: Hypervisor>(
start_addr: usize, start_addr: usize,
block_size: NonZero<usize>, block_size: NonZero<usize>,
event: VmmEventHandler, event: VmmEventHandler,
) -> DeviceTree { ) -> DeviceTree<H> {
let mut map = BTreeMap::<usize, Arc<dyn Device>>::new(); let mut map = BTreeMap::<usize, Arc<dyn Device<H>>>::new();
// Console. // Console.
let addr = start_addr; let addr = start_addr;
let console = Arc::new(Console::new(addr, block_size, event)); let console = Arc::new(Console::new(addr, block_size, event));
assert!(map.insert(console.addr(), console.clone()).is_none()); assert!(map
.insert(<Console as Device<H>>::addr(&console), console.clone())
.is_none());
// Make sure nothing are overlapped. // Make sure nothing are overlapped.
let mut end = start_addr; let mut end = start_addr;
@ -36,31 +37,31 @@ pub fn setup_devices(
} }
/// Contains all virtual devices, except RAM; for the VM. /// Contains all virtual devices, except RAM; for the VM.
pub struct DeviceTree { pub struct DeviceTree<H: Hypervisor> {
console: Arc<Console>, console: Arc<Console>,
map: BTreeMap<usize, Arc<dyn Device>>, map: BTreeMap<usize, Arc<dyn Device<H>>>,
} }
impl DeviceTree { impl<H: Hypervisor> DeviceTree<H> {
pub fn console(&self) -> &Console { pub fn console(&self) -> &impl Device<H> {
&self.console self.console.as_ref()
} }
/// Returns iterator ordered by physical address. /// Returns iterator ordered by physical address.
pub fn map(&self) -> impl Iterator<Item = (usize, &dyn Device)> + '_ { pub fn map(&self) -> impl Iterator<Item = (usize, &dyn Device<H>)> + '_ {
self.map.iter().map(|(addr, dev)| (*addr, dev.as_ref())) self.map.iter().map(|(addr, dev)| (*addr, dev.as_ref()))
} }
} }
/// Virtual device that has a physical address in the virtual machine. /// Virtual device that has a physical address in the virtual machine.
pub trait Device: Send + Sync { pub trait Device<H: Hypervisor>: Send + Sync {
/// Physical address in the virtual machine. /// Physical address in the virtual machine.
fn addr(&self) -> usize; fn addr(&self) -> usize;
/// Total size of device memory, in bytes. /// Total size of device memory, in bytes.
fn len(&self) -> NonZero<usize>; fn len(&self) -> NonZero<usize>;
fn create_context<'a>(&'a self, ram: &'a Ram) -> Box<dyn DeviceContext + 'a>; fn create_context<'a>(&'a self, hv: &'a H) -> Box<dyn DeviceContext + 'a>;
} }
/// Context to execute memory-mapped I/O operations on a virtual device. /// Context to execute memory-mapped I/O operations on a virtual device.

View File

@ -1,9 +1,10 @@
// SPDX-License-Identifier: MIT OR Apache-2.0 // SPDX-License-Identifier: MIT OR Apache-2.0
use self::hv::{Cpu, CpuExit, CpuFeats, CpuIo, Hypervisor}; use self::hv::{Cpu, CpuExit, CpuFeats, CpuIo, Hypervisor};
use self::hw::{setup_devices, Device, DeviceContext, DeviceTree, Ram, RamBuilder, RamMap}; use self::hw::{setup_devices, Device, DeviceContext, DeviceTree};
use self::kernel::{ use self::kernel::{
Kernel, PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_LOAD, PT_NOTE, PT_PHDR, Kernel, PT_DYNAMIC, PT_GNU_EH_FRAME, PT_GNU_RELRO, PT_GNU_STACK, PT_LOAD, PT_NOTE, PT_PHDR,
}; };
use self::ram::{Ram, RamMap};
use self::screen::Screen; use self::screen::Screen;
use crate::error::RustError; use crate::error::RustError;
use crate::profile::Profile; use crate::profile::Profile;
@ -28,6 +29,7 @@ mod arch;
mod hv; mod hv;
mod hw; mod hw;
mod kernel; mod kernel;
mod ram;
mod screen; mod screen;
#[no_mangle] #[no_mangle]
@ -312,16 +314,35 @@ pub unsafe extern "C" fn vmm_run(
}, },
}; };
// Setup RAM builder. // Setup RAM.
let mut ram = match RamBuilder::new(block_size) { let ram = match Ram::new(block_size) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
*err = RustError::wrap(e); *err = RustError::with_source("couldn't create a RAM", e);
return null_mut();
}
};
// Setup hypervisor.
let mut hv = match self::hv::new(8, ram) {
Ok(v) => v,
Err(e) => {
*err = RustError::with_source("couldn't setup a hypervisor", e);
return null_mut();
}
};
// Load CPU features.
let feats = match hv.cpu_features() {
Ok(v) => v,
Err(e) => {
*err = RustError::with_source("couldn't get available vCPU features", e);
return null_mut(); return null_mut();
} }
}; };
// Map the kernel. // Map the kernel.
let mut ram = hv.ram_mut().builder();
let kern = match ram.alloc_kernel(len) { let kern = match ram.alloc_kernel(len) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
@ -385,7 +406,7 @@ pub unsafe extern "C" fn vmm_run(
} }
// Build RAM. // Build RAM.
let (ram, map) = match ram.build(vm_page_size, &devices, dynamic) { let map = match ram.build(vm_page_size, &devices, dynamic) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
*err = RustError::with_source("couldn't build RAM", e); *err = RustError::with_source("couldn't build RAM", e);
@ -393,25 +414,6 @@ pub unsafe extern "C" fn vmm_run(
} }
}; };
// Setup hypervisor.
let ram = Arc::new(ram);
let mut hv = match self::hv::new(8, ram.clone()) {
Ok(v) => v,
Err(e) => {
*err = RustError::with_source("couldn't setup a hypervisor", e);
return null_mut();
}
};
// Load CPU features.
let feats = match hv.cpu_features() {
Ok(v) => v,
Err(e) => {
*err = RustError::with_source("couldn't get available vCPU features", e);
return null_mut();
}
};
// Setup screen. // Setup screen.
let screen = match self::screen::Default::new(screen) { let screen = match self::screen::Default::new(screen) {
Ok(v) => v, Ok(v) => v,
@ -425,7 +427,6 @@ pub unsafe extern "C" fn vmm_run(
let shutdown = Arc::new(AtomicBool::new(false)); let shutdown = Arc::new(AtomicBool::new(false));
let args = CpuArgs { let args = CpuArgs {
hv, hv,
ram,
screen: screen.buffer().clone(), screen: screen.buffer().clone(),
feats, feats,
devices, devices,
@ -514,7 +515,7 @@ fn run_cpu<C: Cpu, H: Hypervisor>(mut cpu: C, args: &CpuArgs<H>) {
.map(|(addr, dev)| { .map(|(addr, dev)| {
let end = dev.len().checked_add(addr).unwrap(); let end = dev.len().checked_add(addr).unwrap();
(addr, (dev.create_context(&args.ram), end)) (addr, (dev.create_context(&args.hv), end))
}) })
.collect::<BTreeMap<usize, (Box<dyn DeviceContext>, NonZero<usize>)>>(); .collect::<BTreeMap<usize, (Box<dyn DeviceContext>, NonZero<usize>)>>();
@ -666,19 +667,15 @@ impl From<MsgType> for VmmLog {
/// Encapsulates arguments for a function to run a CPU. /// Encapsulates arguments for a function to run a CPU.
struct CpuArgs<H: Hypervisor> { struct CpuArgs<H: Hypervisor> {
hv: H, hv: H,
ram: Arc<Ram>,
screen: Arc<<self::screen::Default as Screen>::Buffer>, screen: Arc<<self::screen::Default as Screen>::Buffer>,
feats: CpuFeats, feats: CpuFeats,
devices: Arc<DeviceTree>, devices: Arc<DeviceTree<H>>,
shutdown: Arc<AtomicBool>, shutdown: Arc<AtomicBool>,
} }
/// Represents an error when [`vmm_new()`] fails. /// Represents an error when [`vmm_new()`] fails.
#[derive(Debug, Error)] #[derive(Debug, Error)]
enum VmmError { enum VmmError {
#[error("couldn't create a RAM")]
CreateRamFailed(#[source] std::io::Error),
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
#[error("couldn't get maximum number of CPU for a VM")] #[error("couldn't get maximum number of CPU for a VM")]
GetMaxCpuFailed(#[source] std::io::Error), GetMaxCpuFailed(#[source] std::io::Error),

View File

@ -1,71 +1,31 @@
// SPDX-License-Identifier: MIT OR Apache-2.0 // SPDX-License-Identifier: MIT OR Apache-2.0
use super::{Ram, RamError}; use super::{Ram, RamError};
use crate::vmm::hv::Hypervisor;
use crate::vmm::hw::DeviceTree; use crate::vmm::hw::DeviceTree;
use crate::vmm::kernel::ProgramHeader; use crate::vmm::kernel::ProgramHeader;
use crate::vmm::VmmError;
use obconf::{BootEnv, Config}; use obconf::{BootEnv, Config};
use std::num::NonZero; use std::num::NonZero;
use std::ops::Range; use std::ops::Range;
use thiserror::Error; use thiserror::Error;
/// Struct to build [`Ram`]. /// Struct to build [`Ram`].
pub struct RamBuilder { pub struct RamBuilder<'a> {
ram: Ram, ram: &'a mut Ram,
next: usize, next: usize,
kern: Option<Range<usize>>, kern: Option<Range<usize>>,
stack: Option<Range<usize>>, stack: Option<Range<usize>>,
args: Option<KernelArgs>, args: Option<KernelArgs>,
} }
impl RamBuilder { impl<'a> RamBuilder<'a> {
/// # Safety pub(super) fn new(ram: &'a mut Ram) -> Self {
/// `block_size` must be greater or equal host page size. Self {
pub unsafe fn new(block_size: NonZero<usize>) -> Result<Self, VmmError> { ram,
use std::io::Error;
// Reserve memory range.
#[cfg(unix)]
let mem = {
use libc::{mmap, MAP_ANON, MAP_FAILED, MAP_PRIVATE, PROT_NONE};
use std::ptr::null_mut;
let mem = mmap(
null_mut(),
Ram::SIZE,
PROT_NONE,
MAP_PRIVATE | MAP_ANON,
-1,
0,
);
if mem == MAP_FAILED {
return Err(VmmError::CreateRamFailed(Error::last_os_error()));
}
mem.cast()
};
#[cfg(windows)]
let mem = {
use std::ptr::null;
use windows_sys::Win32::System::Memory::{VirtualAlloc, MEM_RESERVE, PAGE_NOACCESS};
let mem = VirtualAlloc(null(), Ram::SIZE, MEM_RESERVE, PAGE_NOACCESS);
if mem.is_null() {
return Err(VmmError::CreateRamFailed(Error::last_os_error()));
}
mem.cast()
};
Ok(Self {
ram: Ram { mem, block_size },
next: 0, next: 0,
kern: None, kern: None,
stack: None, stack: None,
args: None, args: None,
}) }
} }
/// # Panics /// # Panics
@ -212,13 +172,13 @@ impl RamBuilder {
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
impl RamBuilder { impl<'a> RamBuilder<'a> {
pub fn build( pub fn build<H: Hypervisor>(
mut self, mut self,
page_size: NonZero<usize>, page_size: NonZero<usize>,
devices: &DeviceTree, devices: &DeviceTree<H>,
dynamic: ProgramHeader, dynamic: ProgramHeader,
) -> Result<(Ram, RamMap), RamBuilderError> { ) -> Result<RamMap, RamBuilderError> {
// Allocate page-map level-4 table. We use 4K 4-Level Paging here. You may wonder about this // Allocate page-map level-4 table. We use 4K 4-Level Paging here. You may wonder about this
// because it seems like page size on the PS4 is 16K. The truth is the PS4 emulate the 16K // because it seems like page size on the PS4 is 16K. The truth is the PS4 emulate the 16K
// page size with 4K pages. You can check this by yourself by looking at // page size with 4K pages. You can check this by yourself by looking at
@ -292,7 +252,7 @@ impl RamBuilder {
unsafe { self.relocate_kernel(&map, dynamic, 8)? }; unsafe { self.relocate_kernel(&map, dynamic, 8)? };
Ok((self.ram, map)) Ok(map)
} }
fn setup_4k_page_tables( fn setup_4k_page_tables(
@ -393,17 +353,17 @@ impl RamBuilder {
} }
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
impl RamBuilder { impl<'a> RamBuilder<'a> {
const MA_DEV_NG_NR_NE: u8 = 0; // MEMORY_ATTRS[0] const MA_DEV_NG_NR_NE: u8 = 0; // MEMORY_ATTRS[0]
const MA_NOR: u8 = 1; // MEMORY_ATTRS[1] const MA_NOR: u8 = 1; // MEMORY_ATTRS[1]
const MEMORY_ATTRS: [u8; 8] = [0, 0b11111111, 0, 0, 0, 0, 0, 0]; const MEMORY_ATTRS: [u8; 8] = [0, 0b11111111, 0, 0, 0, 0, 0, 0];
pub fn build( pub fn build<H: Hypervisor>(
mut self, mut self,
page_size: NonZero<usize>, page_size: NonZero<usize>,
devices: &DeviceTree, devices: &DeviceTree<H>,
dynamic: ProgramHeader, dynamic: ProgramHeader,
) -> Result<(Ram, RamMap), RamBuilderError> { ) -> Result<RamMap, RamBuilderError> {
// Setup page tables. // Setup page tables.
let map = match page_size.get() { let map = match page_size.get() {
0x4000 => self.build_16k_page_tables(devices)?, 0x4000 => self.build_16k_page_tables(devices)?,
@ -413,10 +373,13 @@ impl RamBuilder {
// Relocate the kernel to virtual address. // Relocate the kernel to virtual address.
unsafe { self.relocate_kernel(&map, dynamic, 1027)? }; unsafe { self.relocate_kernel(&map, dynamic, 1027)? };
Ok((self.ram, map)) Ok(map)
} }
fn build_16k_page_tables(&mut self, devices: &DeviceTree) -> Result<RamMap, RamBuilderError> { fn build_16k_page_tables<H: Hypervisor>(
&mut self,
devices: &DeviceTree<H>,
) -> Result<RamMap, RamBuilderError> {
// Allocate page table level 0. // Allocate page table level 0.
let page_table = self.next; let page_table = self.next;
let len = self.ram.block_size; let len = self.ram.block_size;

View File

@ -22,6 +22,50 @@ pub struct Ram {
impl Ram { impl Ram {
pub(crate) const SIZE: usize = 1024 * 1024 * 1024 * 8; // 8GB pub(crate) const SIZE: usize = 1024 * 1024 * 1024 * 8; // 8GB
/// # Safety
/// `block_size` must be greater or equal host page size.
pub unsafe fn new(block_size: NonZero<usize>) -> Result<Self, Error> {
use std::io::Error;
// Reserve memory range.
#[cfg(unix)]
let mem = {
use libc::{mmap, MAP_ANON, MAP_FAILED, MAP_PRIVATE, PROT_NONE};
use std::ptr::null_mut;
let mem = mmap(
null_mut(),
Self::SIZE,
PROT_NONE,
MAP_PRIVATE | MAP_ANON,
-1,
0,
);
if mem == MAP_FAILED {
return Err(Error::last_os_error());
}
mem.cast()
};
#[cfg(windows)]
let mem = {
use std::ptr::null;
use windows_sys::Win32::System::Memory::{VirtualAlloc, MEM_RESERVE, PAGE_NOACCESS};
let mem = VirtualAlloc(null(), Self::SIZE, MEM_RESERVE, PAGE_NOACCESS);
if mem.is_null() {
return Err(Error::last_os_error());
}
mem.cast()
};
Ok(Self { mem, block_size })
}
pub fn host_addr(&self) -> *const u8 { pub fn host_addr(&self) -> *const u8 {
self.mem self.mem
} }
@ -30,6 +74,10 @@ impl Ram {
Self::SIZE Self::SIZE
} }
pub fn builder(&mut self) -> RamBuilder {
RamBuilder::new(self)
}
/// # Panics /// # Panics
/// If `addr` or `len` is not multiply by block size. /// If `addr` or `len` is not multiply by block size.
/// ///

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT OR Apache-2.0 // SPDX-License-Identifier: MIT OR Apache-2.0
use super::hv::{Cpu, CpuFeats, CpuStates}; use super::hv::{Cpu, CpuFeats, CpuStates};
use super::hw::RamMap; use super::ram::RamMap;
use super::MainCpuError; use super::MainCpuError;
pub fn setup_main_cpu( pub fn setup_main_cpu(