Finish out the xenon-enet implementation

This commit is contained in:
Dr. Chat 2021-10-24 00:33:47 -05:00
parent f34caf306d
commit 33c45897d4
7 changed files with 342 additions and 100 deletions

53
Cargo.lock generated
View File

@ -17,16 +17,49 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "buddyalloc"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5ded974e2422fae8075cb72650eae5b12f076e480eb44930571f6bddf325b31"
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "core_reqs"
version = "0.1.0"
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if",
]
[[package]]
name = "managed"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c75de51135344a4f8ed3cfe2720dc27736f7711989703a0b43aadf3753c55577"
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
@ -51,6 +84,18 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "smoltcp"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e4a069bef843d170df47e7c0a8bf8d037f217d9f5b325865acc3e466ffe40d3"
dependencies = [
"bitflags",
"byteorder",
"log",
"managed",
]
[[package]]
name = "stage1"
version = "0.1.0"
@ -117,6 +162,14 @@ checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
name = "xenon-cpu"
version = "0.1.0"
[[package]]
name = "xenon-enet"
version = "0.1.0"
dependencies = [
"smoltcp",
"xenon-cpu",
]
[[package]]
name = "xenon-soc"
version = "0.1.0"

View File

@ -5,7 +5,7 @@ members = [
"boot/stage1",
"shared/core_reqs",
"shared/xenon-cpu",
# "shared/xenon-enet",
"shared/xenon-enet",
"shared/xenon-soc",
"shared/sync",
]

View File

@ -1,7 +1,7 @@
[package]
name = "stage1"
version = "0.1.0"
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@ -1,6 +1,5 @@
#![feature(
alloc_error_handler,
const_panic,
const_ptr_offset_from,
global_asm,
lang_items,

View File

@ -1,12 +1,12 @@
[package]
name = "xenon-enet"
version = "0.1.0"
edition = "2018"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
smoltcp = { version = "0.7.3", default-features = false, features = [
smoltcp = { version = "0.7.5", default-features = false, features = [
"log", "proto-ipv4", "proto-ipv6"
] }

View File

@ -1,9 +1,14 @@
#![no_std]
#![feature(iter_zip)]
pub mod ring;
mod ring;
use core::{marker::PhantomData, pin::Pin, ptr::NonNull, sync::atomic::AtomicU32, time::Duration};
extern crate alloc;
use ring::{Ring, RxRing, TxRing};
use alloc::boxed::Box;
use core::{ptr::NonNull, time::Duration};
use smoltcp::phy::{self, Device};
#[allow(dead_code)]
@ -30,21 +35,25 @@ enum Register {
Address1 = 0x7A,
}
/// The type of a transmission ring.
pub enum RingType {
Rx,
Tx,
}
// Flag bit guesses:
// 0x8000_0000: Hardware ownership bit
// 0x4000_0000: ??
// 0x0020_0000: (TX) last buffer?
// 0x0020_0000: (TX) last buffer? e.g. packet not split
// 0x0002_0000: (TX) interrupt related?
// 0x0001_0000: (TX) interrupt related?
const HWDESC_FLAG_HW_OWNED: u32 = 0x80000000;
const HWDESC_LAST_ENTRY: u32 = 0x80000000; // N.B: This is set in the `capacity` field.
const HWDESC_CAP_LAST_ENTRY: u32 = 0x80000000; // N.B: This is set in the `capacity` field.
#[repr(C, align(2048))]
#[derive(Clone)]
pub struct EthernetBuffer([u8; 2048]);
impl Default for EthernetBuffer {
fn default() -> Self {
Self([0u8; 2048])
}
}
/// Transfer descriptor, as defined by hardware.
///
@ -56,18 +65,25 @@ const HWDESC_LAST_ENTRY: u32 = 0x80000000; // N.B: This is set in the `capacity`
/// * Busy: Owned by hardware; pending packet RX
/// * TX
/// * Free: Descriptor is free for queueing a network TX.
/// * Transmitted packet contained within; can free buffer.
/// * No transmit buffer set.
/// * Busy: Owned by hardware; pending packet TX
#[repr(C, align(16))]
#[derive(Clone, Copy)]
struct HwDescriptor {
/// Length of the packet contained within `addr`, if any.
len: u32,
/// Flags interpreted by the hardware, such as an ownership bit or interrupt
/// routing bits.
flags: u32,
/// Physical address of an in-memory buffer used to contain the packet.
addr: u32,
/// Capacity of the in-memory buffer, with the high bit aliased as an "end-of-ring" bit.
capacity: u32,
}
impl HwDescriptor {
pub fn new() -> Self {
fn new() -> Self {
Self {
len: 0,
flags: 0,
@ -76,8 +92,9 @@ impl HwDescriptor {
}
}
pub fn is_free(&self) -> bool {
(self.flags & HWDESC_FLAG_HW_OWNED) == 0
/// Query to see if this descriptor is currently busy (owned by hardware) at this point in time.
fn is_busy(&self) -> bool {
(unsafe { core::ptr::read_volatile(&self.flags) } & HWDESC_FLAG_HW_OWNED) != 0
}
}
@ -85,8 +102,8 @@ impl HwDescriptor {
pub struct EthernetDevice<const N: usize, const M: usize> {
mmio: core::ptr::NonNull<u8>,
rx_ring: ring::Ring<ring::RxRing, N>,
tx_ring: ring::Ring<ring::TxRing, M>,
rx_ring: Ring<RxRing, N>,
tx_ring: Ring<TxRing, M>,
}
impl<const N: usize, const M: usize> EthernetDevice<N, M> {
@ -94,15 +111,12 @@ impl<const N: usize, const M: usize> EthernetDevice<N, M> {
///
/// SAFETY: The caller _MUST_ ensure that there is only one instance
/// of this object at a time. Multiple instances will cause undefined behavior.
pub unsafe fn new(
rx_ring: ring::Ring<ring::RxRing, N>,
tx_ring: ring::Ring<ring::TxRing, M>,
) -> Self {
pub unsafe fn new() -> Self {
let mut obj = Self {
mmio: NonNull::new_unchecked(0x8000_0200_EA00_1400 as *mut u8),
rx_ring,
tx_ring,
rx_ring: Ring::new(),
tx_ring: Ring::new(),
};
obj.reset();
@ -143,17 +157,48 @@ impl<const N: usize, const M: usize> EthernetDevice<N, M> {
}
}
/*
impl<'a> Device<'a> for EthernetDevice {
type RxToken = EthernetRxToken<'a>;
type TxToken = EthernetTxToken<'a>;
/// Represents a token that, when consumed, yields a received packet.
pub struct EthernetRxToken<'ring, const N: usize>(ring::CompleteDescriptor<'ring, ring::RxRing, N>);
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
None
/// Represents a token that, when consumed, takes ownership of a buffer containing a packet to be sent.
pub struct EthernetTxToken<'ring, const M: usize>(ring::FreeDescriptor<'ring, ring::TxRing, M>);
// Implement the smoltcp interface to the Xenon ethernet device.
impl<'dev, const N: usize, const M: usize> Device<'dev> for EthernetDevice<N, M> {
type RxToken = EthernetRxToken<'dev, N>;
type TxToken = EthernetTxToken<'dev, M>;
fn receive(&'dev mut self) -> Option<(Self::RxToken, Self::TxToken)> {
// Free up completed TX descriptors.
while let Some(desc) = self.tx_ring.get_next_complete() {
// Free the desc, drop the inner buffer. Maybe attempt to reuse it in the future.
desc.free();
}
// Requeue free RX descriptors.
while let Some(desc) = self.rx_ring.get_next_free() {
let buf = Box::new(EthernetBuffer::default());
// Submit the descriptor back to hardware.
desc.submit(buf);
}
Some((
EthernetRxToken(self.rx_ring.get_next_complete()?),
EthernetTxToken(self.tx_ring.get_next_free()?),
))
}
fn transmit(&'a mut self) -> Option<Self::TxToken> {
None
fn transmit(&'dev mut self) -> Option<Self::TxToken> {
// Free up completed TX descriptors.
while let Some(desc) = self.tx_ring.get_next_complete() {
// Free the desc, drop the inner buffer. Maybe attempt to reuse it in the future.
desc.free();
}
// Now try to get the next free entry again. In most cases, it will point to an
// entry we just freed.
Some(EthernetTxToken(self.tx_ring.get_next_free()?))
}
fn capabilities(&self) -> smoltcp::phy::DeviceCapabilities {
@ -166,26 +211,31 @@ impl<'a> Device<'a> for EthernetDevice {
}
}
impl<'a> phy::RxToken for EthernetRxToken<'a> {
fn consume<R, F>(self, timestamp: smoltcp::time::Instant, f: F) -> smoltcp::Result<R>
impl<'a, const N: usize> phy::RxToken for EthernetRxToken<'a, N> {
fn consume<R, F>(self, _timestamp: smoltcp::time::Instant, f: F) -> smoltcp::Result<R>
where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
{
todo!()
let (mut buf, len) = self.0.free();
f(&mut buf.0[..len])
}
}
impl<'a> phy::TxToken for EthernetTxToken<'a> {
impl<'a, const M: usize> phy::TxToken for EthernetTxToken<'a, M> {
fn consume<R, F>(
self,
timestamp: smoltcp::time::Instant,
_timestamp: smoltcp::time::Instant,
len: usize,
f: F,
) -> smoltcp::Result<R>
where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
{
todo!()
let mut buf = Box::new(EthernetBuffer::default());
let res = f(&mut buf.0[..len])?;
self.0.submit(buf, len);
Ok(res)
}
}
*/

View File

@ -1,102 +1,242 @@
//! This module contains code for ethernet ring management.
use super::{EthernetBuffer, HwDescriptor};
use core::marker::PhantomData;
use core::{marker::PhantomData, sync::atomic::Ordering};
pub struct EthernetPendingDesc<'a>(&'a mut HwDescriptor, &'a mut [u8]);
extern crate alloc;
use alloc::boxed::Box;
pub struct EthernetDescBuilder<'a>(&'a mut HwDescriptor, &'a mut [u8]);
/// An individual "logical" descriptor, used to track extra information
/// associated with hardware descriptors.
#[derive(Default, Clone)]
struct LogicalDescriptor {
/// The managed heap buffer assigned to this descriptor, if any.
buf: Option<Box<EthernetBuffer>>,
}
impl<'a> EthernetDescBuilder<'a> {
fn new(desc: &'a mut HwDescriptor, buf: &'static mut [u8]) -> Self {
desc.capacity = (desc.capacity & 0x80000000) | ((buf.len() as u32) & 0x7FFFFFFF);
desc.addr = buf.as_mut_ptr() as u32;
Self(desc, buf)
}
pub fn set_flags(self, flags: u32) -> Self {
self.0.flags = flags;
self
}
pub fn commit(self) -> EthernetPendingDesc<'a> {
self.0.flags |= super::HWDESC_FLAG_HW_OWNED;
EthernetPendingDesc(self.0, self.1)
impl core::fmt::Debug for LogicalDescriptor {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("LogicalDescriptor")
.field("buf", &self.buf.is_some())
.finish()
}
}
/// Represents an ethernet descriptor that has pending data.
pub struct EthernetCompleteDesc<'a>(&'a mut HwDescriptor);
pub trait RingType {}
/// Receive ring marker
pub struct RxRing;
/// Transfer ring marker
pub struct TxRing;
trait RingType {}
impl RingType for RxRing {}
impl RingType for TxRing {}
/// This structure represents a ring of DMA buffer descriptors for a Xenon MAC.
///
/// # Usage (RX)
/// ```rust,ignore
/// let mut ring: Ring<RxRing, 16>;
/// # Hardware interaction
/// Hardware and software may both access descriptors in the ring at the same time.
///
/// match ring.next_free() {
/// Some(desc) => {},
/// None => {}
/// }
/// ```
pub struct Ring<T: RingType, const N: usize> {
ring_type: PhantomData<T>,
/// When a descriptor is ready for hardware processing, the ownership bit is flipped
/// such that the hardware now "owns" the descriptor.
/// Processing means sending a packet for TX, or the reception of a packet for RX.
///
/// When the hardware owns the descriptor, we may not touch it whatsoever.
/// As such, this interface offers no way to retrieve hardware-owned descriptors.
///
/// When a descriptor is finished processing, hardware will turn off the ownership
/// bit, handing ownership back to us. At this point, we can take the buffer out of
/// the descriptor and process or free it.
pub struct Ring<S: RingType, const N: usize> {
_ring_type: PhantomData<S>,
/// A contiguous array of hardware descriptors. The hardware will receive a pointer to this.
descriptors: [HwDescriptor; N],
/// This _assumes_ that the MMU is disabled, and `va == pa`.
hw_descriptors: Box<[HwDescriptor; N]>,
/// Index of first busy buffer (or represents no busy buffers if equivalent to `avail`)
busy: usize,
/// Index of first free buffer
avail: usize,
/// Associated logical descriptors, tracking extra information that can't live inside
/// of the hardware descriptors.
descriptors: [LogicalDescriptor; N],
/// The next busy descriptor, without wraparound.
next_busy: usize,
/// The next free descriptor, without wraparound. If `next_free` == `next_busy`, all descriptors are free.
next_free: usize,
}
impl<T: RingType, const N: usize> Ring<T, N> {
fn new() -> Self {
let mut descriptors = [HwDescriptor::new(); N];
descriptors.last_mut().unwrap().capacity = super::HWDESC_LAST_ENTRY;
impl<S: RingType, const N: usize> Ring<S, N> {
/// Construct a new ethernet ring, with an allocation backed by the global allocator.
pub fn new() -> Self {
let mut hw_descriptors = Box::new([HwDescriptor::new(); N]);
hw_descriptors.last_mut().unwrap().capacity = super::HWDESC_CAP_LAST_ENTRY;
const LOGDESC_INIT: LogicalDescriptor = LogicalDescriptor { buf: None };
Self {
ring_type: PhantomData,
descriptors: [HwDescriptor::new(); N],
_ring_type: PhantomData,
hw_descriptors,
descriptors: [LOGDESC_INIT; N],
busy: 0,
avail: 0,
next_busy: 0,
next_free: 0,
}
}
pub fn new_rx(buffers: [EthernetBuffer; N]) -> Self {
let mut obj = Ring::new();
/// Retrieve the next unused descriptor, if any.
pub fn get_next_free<'ring>(&'ring mut self) -> Option<FreeDescriptor<'ring, S, N>> {
// If `next_free` is >= `N` slots away from `next_busy`,
// the entire ring has been consumed.
if self.next_free - self.next_busy >= N {
None
} else {
// N.B: Do not increment `next_free` here. The descriptor must do that when submitted.
// Because of the mutable borrow against `self`, callers cannot fetch more than
// one descriptor at a time.
let idx = self.next_free % N;
for (mut desc, buf) in core::iter::zip(obj.descriptors, buffers) {
desc.addr = buf.0.as_mut_ptr() as u32;
Some(FreeDescriptor { ring: self, idx })
}
}
/// Retrieve the next completed descriptor, if any.
pub fn get_next_complete(&mut self) -> Option<CompleteDescriptor<'_, S, N>> {
if self.next_busy == self.next_free {
None
} else {
let idx = self.next_busy % N;
// Now, we need to check and see if the HW ownership bit if set.
// If so, do not return a reference.
if self.hw_descriptors[idx].is_busy() {
None
} else {
Some(CompleteDescriptor { ring: self, idx })
}
}
}
}
unsafe fn read_mod_write_volatile<T>(addr: *mut T, func: impl FnOnce(T) -> T) {
let oval = core::ptr::read_volatile(addr);
let nval = func(oval);
core::ptr::write_volatile(addr, nval);
}
/// Represents a safe interface for a particular free hardware descriptor.
pub struct FreeDescriptor<'a, S: RingType, const N: usize> {
/// The ring that owns this descriptor.
ring: &'a mut Ring<S, N>,
/// The wrapped-around descriptor index.
idx: usize,
}
// Actions corresponding to a free descriptor on the RX ring.
impl<'a, const N: usize> FreeDescriptor<'a, RxRing, N> {
/// Submit this descriptor to hardware.
pub fn submit(self, buf: Box<EthernetBuffer>) {
// Update the hardware descriptor.
let hw_desc = &mut self.ring.hw_descriptors[self.idx];
unsafe {
core::ptr::write_volatile(&mut hw_desc.len, 0); // RX: 0 bytes initial length
core::ptr::write_volatile(&mut hw_desc.addr, buf.0.as_ptr() as u32);
read_mod_write_volatile(&mut hw_desc.capacity, |v| {
// N.B: Avoid overwriting HWDESC_CAP_LAST_ENTRY.
(v & super::HWDESC_CAP_LAST_ENTRY) | (buf.0.len() as u32 & 0x7FFF_FFFF)
});
// Prevent reordering of the above writes and the below ownership flag modification.
core::sync::atomic::fence(Ordering::SeqCst);
// TODO: Figure out what magic bit 0x4000_0000 is.
core::ptr::write_volatile(
&mut hw_desc.flags,
super::HWDESC_FLAG_HW_OWNED | 0x4000_0000,
);
}
obj
self.ring.next_free += 1;
}
}
// Actions corresponding to a free descriptor on the TX ring.
impl<'a, const N: usize> FreeDescriptor<'a, TxRing, N> {
/// Submit this descriptor to hardware.
pub fn submit(self, buf: Box<EthernetBuffer>, len: usize) {
// Update the hardware descriptor.
let hw_desc = &mut self.ring.hw_descriptors[self.idx];
unsafe {
core::ptr::write_volatile(&mut hw_desc.len, len as u32);
core::ptr::write_volatile(&mut hw_desc.addr, buf.0.as_ptr() as u32);
read_mod_write_volatile(&mut hw_desc.capacity, |v| {
// N.B: Avoid overwriting HWDESC_CAP_LAST_ENTRY.
(v & super::HWDESC_CAP_LAST_ENTRY) | (buf.0.len() as u32 & 0x7FFF_FFFF)
});
// Prevent reordering of the above writes and the below ownership flag modification.
core::sync::atomic::fence(Ordering::SeqCst);
// TODO: Figure out the magic bits 0x4023_0000.
core::ptr::write_volatile(
&mut hw_desc.flags,
super::HWDESC_FLAG_HW_OWNED | 0x4023_0000,
);
}
// Update the logical descriptor.
self.ring.descriptors[self.idx].buf.replace(buf);
self.ring.next_free += 1;
}
}
/// Represents a safe interface for a particular completed hardware descriptor.
pub struct CompleteDescriptor<'a, S: RingType, const N: usize> {
ring: &'a mut Ring<S, N>,
idx: usize,
}
impl<'a, S: RingType, const N: usize> CompleteDescriptor<'a, S, N> {
// take() function for contained buffer
// -> tx, caller takes buffer and frees or reuses
// -> rx, caller submits packet to netstack and (typically) reuses buffer
// submit() to transfer this descriptor to HW for processing
/// Mark a previously finished descriptor as free, taking the buffer out of it.
/// This returns a tuple of the buffer and the length used by hardware.
pub fn free(self) -> (Box<EthernetBuffer>, usize) {
// Clear out the descriptor.
let hw_desc = &mut self.ring.hw_descriptors[self.idx];
let len = unsafe {
core::ptr::write_volatile(&mut hw_desc.addr, 0x0BADF00D);
core::ptr::read_volatile(&hw_desc.len)
};
// Take the buffer from the logical descriptor.
let buf = self.ring.descriptors[self.idx]
.buf
.take()
.expect("no buffer in completed descriptor");
self.ring.next_busy += 1;
(buf, len as usize)
}
}
/// This implements methods specific to an RX descriptor ring.
impl<const N: usize> Ring<RxRing, N> {
/// Attempt to consume a descriptor and swap its buffer with
/// the input buffer.
pub fn consume() -> EthernetBuffer {}
// ref empty descriptors
// -> fill with memory buffers
// ref completed descriptors
// -> take and maybe replace packet buffer
// no ref of hardware-owned descriptors
}
/// This implements methods specific to a TX descriptor ring.
impl<const N: usize> Ring<TxRing, N> {
pub fn transmit_buffer(buffer: EthernetBuffer) -> Result<(), EthernetBuffer> {
// TODO:
// Attempt to find a free descriptor, or error out if none are free.
Ok(())
}
// ref empty descriptors
// -> put buffer into ring for future tx
// ref completed descriptors
// -> take buffer and free to heap (or reuse)
// no ref of hardware-owned descriptors
}