use std::mem::size_of;
use std::sync::atomic::fence;
use std::sync::atomic::Ordering;
use remain::sorted;
use thiserror::Error;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use vm_memory::GuestMemoryError;
use zerocopy::AsBytes;
use super::xhci_abi::EventRingSegmentTableEntry;
use super::xhci_abi::Trb;
#[sorted]
#[derive(Error, Debug)]
pub enum Error {
#[error("event ring has a bad enqueue pointer: {0}")]
BadEnqueuePointer(GuestAddress),
#[error("event ring has a bad seg table addr: {0}")]
BadSegTableAddress(GuestAddress),
#[error("event ring has a bad seg table index: {0}")]
BadSegTableIndex(u16),
#[error("event ring is full")]
EventRingFull,
#[error("event ring cannot read from guest memory: {0}")]
MemoryRead(GuestMemoryError),
#[error("event ring cannot write to guest memory: {0}")]
MemoryWrite(GuestMemoryError),
#[error("event ring is uninitialized")]
Uninitialized,
}
type Result<T> = std::result::Result<T, Error>;
pub struct EventRing {
mem: GuestMemory,
segment_table_size: u16,
segment_table_base_address: GuestAddress,
current_segment_index: u16,
trb_count: u16,
enqueue_pointer: GuestAddress,
dequeue_pointer: GuestAddress,
producer_cycle_state: bool,
}
impl EventRing {
pub fn new(mem: GuestMemory) -> Self {
EventRing {
mem,
segment_table_size: 0,
segment_table_base_address: GuestAddress(0),
current_segment_index: 0,
enqueue_pointer: GuestAddress(0),
dequeue_pointer: GuestAddress(0),
trb_count: 0,
producer_cycle_state: true,
}
}
pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
self.check_inited()?;
if self.is_full()? {
return Err(Error::EventRingFull);
}
trb.set_cycle(!self.producer_cycle_state);
self.mem
.write_obj_at_addr(trb, self.enqueue_pointer)
.map_err(Error::MemoryWrite)?;
fence(Ordering::SeqCst);
trb.set_cycle(self.producer_cycle_state);
const CYCLE_STATE_OFFSET: usize = 12usize;
let data = trb.as_bytes();
let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
let address = self.enqueue_pointer;
let address = address
.checked_add(CYCLE_STATE_OFFSET as u64)
.ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
self.mem
.write_all_at_addr(cycle_bit_dword, address)
.map_err(Error::MemoryWrite)?;
xhci_trace!(
"event write to pointer {:#x}, trb_count {}, {}",
self.enqueue_pointer.0,
self.trb_count,
trb
);
self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
Some(addr) => addr,
None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
};
self.trb_count -= 1;
if self.trb_count == 0 {
self.current_segment_index += 1;
if self.current_segment_index == self.segment_table_size {
self.producer_cycle_state ^= true;
self.current_segment_index = 0;
}
self.load_current_seg_table_entry()?;
}
Ok(())
}
pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
xhci_trace!("set_seg_table_size({:#x})", size);
self.segment_table_size = size;
self.try_reconfigure_event_ring()
}
pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
xhci_trace!("set_seg_table_base_addr({:#x})", addr.0);
self.segment_table_base_address = addr;
self.try_reconfigure_event_ring()
}
pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
xhci_trace!("set_dequeue_pointer({:#x})", addr.0);
self.dequeue_pointer = addr;
}
pub fn is_empty(&self) -> bool {
self.enqueue_pointer == self.dequeue_pointer
}
pub fn is_full(&self) -> Result<bool> {
if self.trb_count == 1 {
let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
} else {
Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
}
}
fn try_reconfigure_event_ring(&mut self) -> Result<()> {
if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
return Ok(());
}
self.load_current_seg_table_entry()
}
fn check_inited(&self) -> Result<()> {
if self.segment_table_size == 0
|| self.segment_table_base_address == GuestAddress(0)
|| self.enqueue_pointer == GuestAddress(0)
{
return Err(Error::Uninitialized);
}
Ok(())
}
fn load_current_seg_table_entry(&mut self) -> Result<()> {
let entry = self.read_seg_table_entry(self.current_segment_index)?;
self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
self.trb_count = entry.get_ring_segment_size();
Ok(())
}
fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
let seg_table_addr = self.get_seg_table_addr(index)?;
self.mem
.read_obj_from_addr(seg_table_addr)
.map_err(Error::MemoryRead)
}
fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
if index > self.segment_table_size {
return Err(Error::BadSegTableIndex(index));
}
self.segment_table_base_address
.checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
.ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
}
}
#[cfg(test)]
mod test {
use std::mem::size_of;
use base::pagesize;
use super::*;
#[test]
fn test_uninited() {
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
let mut er = EventRing::new(gm);
let trb = Trb::new();
match er.add_event(trb).err().unwrap() {
Error::Uninitialized => {}
_ => panic!("unexpected error"),
}
assert_eq!(er.is_empty(), true);
assert_eq!(er.is_full().unwrap(), false);
}
#[test]
fn test_event_ring() {
let trb_size = size_of::<Trb>() as u64;
let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
let mut er = EventRing::new(gm.clone());
let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
st_entries[0].set_ring_segment_base_address(0x100);
st_entries[0].set_ring_segment_size(3);
st_entries[1].set_ring_segment_base_address(0x200);
st_entries[1].set_ring_segment_size(3);
st_entries[2].set_ring_segment_base_address(0x300);
st_entries[2].set_ring_segment_size(3);
gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
.unwrap();
gm.write_obj_at_addr(
st_entries[1],
GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
)
.unwrap();
gm.write_obj_at_addr(
st_entries[2],
GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
)
.unwrap();
er.set_seg_table_size(3).unwrap();
er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
er.set_dequeue_pointer(GuestAddress(0x100));
let mut trb = Trb::new();
trb.set_control(1);
assert_eq!(er.is_empty(), true);
assert_eq!(er.is_full().unwrap(), false);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
assert_eq!(t.get_control(), 1);
assert_eq!(t.get_cycle(), true);
trb.set_control(2);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x100 + trb_size))
.unwrap();
assert_eq!(t.get_control(), 2);
assert_eq!(t.get_cycle(), true);
trb.set_control(3);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
.unwrap();
assert_eq!(t.get_control(), 3);
assert_eq!(t.get_cycle(), true);
trb.set_control(4);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
assert_eq!(t.get_control(), 4);
assert_eq!(t.get_cycle(), true);
trb.set_control(5);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x200 + trb_size))
.unwrap();
assert_eq!(t.get_control(), 5);
assert_eq!(t.get_cycle(), true);
trb.set_control(6);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size))
.unwrap();
assert_eq!(t.get_control(), 6);
assert_eq!(t.get_cycle(), true);
trb.set_control(7);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
assert_eq!(t.get_control(), 7);
assert_eq!(t.get_cycle(), true);
trb.set_control(8);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), true);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x300 + trb_size))
.unwrap();
assert_eq!(t.get_control(), 8);
assert_eq!(t.get_cycle(), true);
match er.add_event(trb) {
Err(Error::EventRingFull) => {}
_ => panic!("er should be full"),
};
er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
trb.set_control(9);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), true);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x300 + trb_size))
.unwrap();
assert_eq!(t.get_control(), 8);
assert_eq!(t.get_cycle(), true);
match er.add_event(trb) {
Err(Error::EventRingFull) => {}
_ => panic!("er should be full"),
};
er.set_dequeue_pointer(GuestAddress(0x100));
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), true);
trb.set_control(10);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
assert_eq!(t.get_control(), 10);
assert_eq!(t.get_cycle(), false);
trb.set_control(11);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x100 + trb_size))
.unwrap();
assert_eq!(t.get_control(), 11);
assert_eq!(t.get_cycle(), false);
trb.set_control(12);
assert!(er.add_event(trb).is_ok());
assert_eq!(er.is_full().unwrap(), false);
assert_eq!(er.is_empty(), false);
let t: Trb = gm
.read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
.unwrap();
assert_eq!(t.get_control(), 12);
assert_eq!(t.get_cycle(), false);
}
}