use std::convert::TryInto;
use anyhow::Context;
use base::error;
use base::info;
use base::AsRawDescriptor;
use base::Error as SysError;
use base::Event;
use base::RawDescriptor;
use base::Tube;
use base::TubeError;
use bit_field::*;
use remain::sorted;
use serde::Deserialize;
use serde::Serialize;
use thiserror::Error;
use vm_control::VmIrqRequest;
use vm_control::VmIrqResponse;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
use zerocopy::FromZeroes;
use crate::pci::pci_configuration::PciCapConfig;
use crate::pci::pci_configuration::PciCapConfigWriteResult;
use crate::pci::PciCapability;
use crate::pci::PciCapabilityID;
const MAX_MSIX_VECTORS_PER_DEVICE: u16 = 2048;
pub const MSIX_TABLE_ENTRIES_MODULO: u64 = 16;
pub const MSIX_PBA_ENTRIES_MODULO: u64 = 8;
pub const BITS_PER_PBA_ENTRY: usize = 64;
const FUNCTION_MASK_BIT: u16 = 0x4000;
const MSIX_ENABLE_BIT: u16 = 0x8000;
const MSIX_TABLE_ENTRY_MASK_BIT: u32 = 0x1;
#[derive(Serialize, Deserialize, Clone, Default)]
struct MsixTableEntry {
msg_addr_lo: u32,
msg_addr_hi: u32,
msg_data: u32,
vector_ctl: u32,
}
impl MsixTableEntry {
fn masked(&self) -> bool {
self.vector_ctl & MSIX_TABLE_ENTRY_MASK_BIT == MSIX_TABLE_ENTRY_MASK_BIT
}
}
struct IrqfdGsi {
irqfd: Event,
gsi: u32,
}
pub struct MsixConfig {
table_entries: Vec<MsixTableEntry>,
pba_entries: Vec<u64>,
irq_vec: Vec<Option<IrqfdGsi>>,
masked: bool,
enabled: bool,
msi_device_socket: Tube,
msix_num: u16,
pci_id: u32,
device_name: String,
}
#[derive(Serialize, Deserialize)]
struct MsixConfigSnapshot {
table_entries: Vec<MsixTableEntry>,
pba_entries: Vec<u64>,
irq_gsi_vec: Vec<Option<u32>>,
masked: bool,
enabled: bool,
msix_num: u16,
pci_id: u32,
device_name: String,
}
#[sorted]
#[derive(Error, Debug)]
pub enum MsixError {
#[error("AddMsiRoute failed: {0}")]
AddMsiRoute(SysError),
#[error("failed to receive AddMsiRoute response: {0}")]
AddMsiRouteRecv(TubeError),
#[error("failed to send AddMsiRoute request: {0}")]
AddMsiRouteSend(TubeError),
#[error("AllocateOneMsi failed: {0}")]
AllocateOneMsi(SysError),
#[error("failed to receive AllocateOneMsi response: {0}")]
AllocateOneMsiRecv(TubeError),
#[error("failed to send AllocateOneMsi request: {0}")]
AllocateOneMsiSend(TubeError),
#[error("failed to deserialize snapshot: {0}")]
DeserializationFailed(serde_json::Error),
#[error("invalid vector length in snapshot: {0}")]
InvalidVectorLength(std::num::TryFromIntError),
#[error("ReleaseOneIrq failed: {0}")]
ReleaseOneIrq(base::Error),
#[error("failed to receive ReleaseOneIrq response: {0}")]
ReleaseOneIrqRecv(TubeError),
#[error("failed to send ReleaseOneIrq request: {0}")]
ReleaseOneIrqSend(TubeError),
}
type MsixResult<T> = std::result::Result<T, MsixError>;
#[derive(Copy, Clone)]
pub enum MsixStatus {
Changed,
EntryChanged(usize),
NothingToDo,
}
impl PciCapConfigWriteResult for MsixStatus {}
impl MsixConfig {
pub fn new(msix_vectors: u16, vm_socket: Tube, pci_id: u32, device_name: String) -> Self {
assert!(msix_vectors <= MAX_MSIX_VECTORS_PER_DEVICE);
let mut table_entries: Vec<MsixTableEntry> = Vec::new();
table_entries.resize_with(msix_vectors as usize, Default::default);
table_entries
.iter_mut()
.for_each(|entry| entry.vector_ctl |= MSIX_TABLE_ENTRY_MASK_BIT);
let mut pba_entries: Vec<u64> = Vec::new();
let num_pba_entries: usize =
((msix_vectors as usize) + BITS_PER_PBA_ENTRY - 1) / BITS_PER_PBA_ENTRY;
pba_entries.resize_with(num_pba_entries, Default::default);
let mut irq_vec = Vec::new();
irq_vec.resize_with(msix_vectors.into(), || None::<IrqfdGsi>);
MsixConfig {
table_entries,
pba_entries,
irq_vec,
masked: false,
enabled: false,
msi_device_socket: vm_socket,
msix_num: msix_vectors,
pci_id,
device_name,
}
}
pub fn num_vectors(&self) -> u16 {
self.msix_num
}
pub fn masked(&self) -> bool {
self.masked
}
pub fn table_masked(&self, index: usize) -> bool {
if index >= self.table_entries.len() {
true
} else {
self.table_entries[index].masked()
}
}
pub fn enabled(&self) -> bool {
self.enabled
}
pub fn read_msix_capability(&self, data: u32) -> u32 {
let mut msg_ctl = (data >> 16) as u16;
msg_ctl &= !(MSIX_ENABLE_BIT | FUNCTION_MASK_BIT);
if self.enabled {
msg_ctl |= MSIX_ENABLE_BIT;
}
if self.masked {
msg_ctl |= FUNCTION_MASK_BIT;
}
(msg_ctl as u32) << 16 | (data & u16::MAX as u32)
}
pub fn write_msix_capability(&mut self, offset: u64, data: &[u8]) -> MsixStatus {
if offset == 2 && data.len() == 2 {
let reg = u16::from_le_bytes([data[0], data[1]]);
let old_masked = self.masked;
let old_enabled = self.enabled;
self.masked = (reg & FUNCTION_MASK_BIT) == FUNCTION_MASK_BIT;
self.enabled = (reg & MSIX_ENABLE_BIT) == MSIX_ENABLE_BIT;
if !old_enabled && self.enabled {
if let Err(e) = self.msix_enable_all() {
error!("failed to enable MSI-X: {}", e);
self.enabled = false;
}
}
if old_masked && !self.masked {
for (index, entry) in self.table_entries.clone().iter().enumerate() {
if !entry.masked() && self.get_pba_bit(index as u16) == 1 {
self.inject_msix_and_clear_pba(index);
}
}
return MsixStatus::Changed;
} else if !old_masked && self.masked {
return MsixStatus::Changed;
}
} else {
error!(
"invalid write to MSI-X Capability Structure offset {:x}",
offset
);
}
MsixStatus::NothingToDo
}
pub fn snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
serde_json::to_value(MsixConfigSnapshot {
table_entries: self.table_entries.clone(),
pba_entries: self.pba_entries.clone(),
masked: self.masked,
enabled: self.enabled,
msix_num: self.msix_num,
pci_id: self.pci_id,
device_name: self.device_name.clone(),
irq_gsi_vec: self
.irq_vec
.iter()
.map(|irq_opt| irq_opt.as_ref().map(|irq| irq.gsi))
.collect(),
})
.context("failed to serialize MsixConfigSnapshot")
}
pub fn restore(&mut self, snapshot: serde_json::Value) -> MsixResult<()> {
let snapshot: MsixConfigSnapshot =
serde_json::from_value(snapshot).map_err(MsixError::DeserializationFailed)?;
self.table_entries = snapshot.table_entries;
self.pba_entries = snapshot.pba_entries;
self.masked = snapshot.masked;
self.enabled = snapshot.enabled;
self.msix_num = snapshot.msix_num;
self.pci_id = snapshot.pci_id;
self.device_name = snapshot.device_name;
self.msix_release_all()?;
self.irq_vec
.resize_with(snapshot.irq_gsi_vec.len(), || None::<IrqfdGsi>);
for (vector, gsi) in snapshot.irq_gsi_vec.iter().enumerate() {
if let Some(gsi_num) = gsi {
self.msix_restore_one(vector, *gsi_num)?;
} else {
info!(
"skipping restore of vector {} for device {}",
vector, self.device_name
);
}
}
Ok(())
}
fn msix_restore_one(&mut self, index: usize, gsi: u32) -> MsixResult<()> {
let irqfd = Event::new().map_err(MsixError::AllocateOneMsi)?;
let request = VmIrqRequest::AllocateOneMsiAtGsi {
irqfd,
gsi,
device_id: self.pci_id,
queue_id: index,
device_name: self.device_name.clone(),
};
self.msi_device_socket
.send(&request)
.map_err(MsixError::AllocateOneMsiSend)?;
if let VmIrqResponse::Err(e) = self
.msi_device_socket
.recv()
.map_err(MsixError::AllocateOneMsiRecv)?
{
return Err(MsixError::AllocateOneMsi(e));
};
self.irq_vec[index] = Some(IrqfdGsi {
irqfd: match request {
VmIrqRequest::AllocateOneMsiAtGsi { irqfd, .. } => irqfd,
_ => unreachable!(),
},
gsi,
});
self.add_msi_route(index as u16, gsi)?;
Ok(())
}
fn msix_release_all(&mut self) -> MsixResult<()> {
for irqfd_gsi in self.irq_vec.drain(..).flatten() {
let request = VmIrqRequest::ReleaseOneIrq {
gsi: irqfd_gsi.gsi,
irqfd: irqfd_gsi.irqfd,
};
self.msi_device_socket
.send(&request)
.map_err(MsixError::ReleaseOneIrqSend)?;
if let VmIrqResponse::Err(e) = self
.msi_device_socket
.recv()
.map_err(MsixError::ReleaseOneIrqRecv)?
{
return Err(MsixError::ReleaseOneIrq(e));
}
}
Ok(())
}
fn add_msi_route(&mut self, index: u16, gsi: u32) -> MsixResult<()> {
let mut data: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
self.read_msix_table((index * 16).into(), data.as_mut());
let msi_address: u64 = u64::from_le_bytes(data);
let mut data: [u8; 4] = [0, 0, 0, 0];
self.read_msix_table((index * 16 + 8).into(), data.as_mut());
let msi_data: u32 = u32::from_le_bytes(data);
if msi_address == 0 {
return Ok(());
}
self.msi_device_socket
.send(&VmIrqRequest::AddMsiRoute {
gsi,
msi_address,
msi_data,
})
.map_err(MsixError::AddMsiRouteSend)?;
if let VmIrqResponse::Err(e) = self
.msi_device_socket
.recv()
.map_err(MsixError::AddMsiRouteRecv)?
{
return Err(MsixError::AddMsiRoute(e));
}
Ok(())
}
fn msix_enable_all(&mut self) -> MsixResult<()> {
for index in 0..self.irq_vec.len() {
self.msix_enable_one(index)?;
}
Ok(())
}
fn msix_enable_one(&mut self, index: usize) -> MsixResult<()> {
if self.irq_vec[index].is_some()
|| !self.enabled()
|| self.masked()
|| self.table_masked(index)
{
return Ok(());
}
let irqfd = Event::new().map_err(MsixError::AllocateOneMsi)?;
let request = VmIrqRequest::AllocateOneMsi {
irqfd,
device_id: self.pci_id,
queue_id: index,
device_name: self.device_name.clone(),
};
self.msi_device_socket
.send(&request)
.map_err(MsixError::AllocateOneMsiSend)?;
let irq_num: u32 = match self
.msi_device_socket
.recv()
.map_err(MsixError::AllocateOneMsiRecv)?
{
VmIrqResponse::AllocateOneMsi { gsi } => gsi,
VmIrqResponse::Err(e) => return Err(MsixError::AllocateOneMsi(e)),
_ => unreachable!(),
};
self.irq_vec[index] = Some(IrqfdGsi {
irqfd: match request {
VmIrqRequest::AllocateOneMsi { irqfd, .. } => irqfd,
_ => unreachable!(),
},
gsi: irq_num,
});
self.add_msi_route(index as u16, irq_num)?;
Ok(())
}
pub fn read_msix_table(&self, offset: u64, data: &mut [u8]) {
let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
match data.len() {
4 => {
let value = match modulo_offset {
0x0 => self.table_entries[index].msg_addr_lo,
0x4 => self.table_entries[index].msg_addr_hi,
0x8 => self.table_entries[index].msg_data,
0xc => self.table_entries[index].vector_ctl,
_ => {
error!("invalid offset");
0
}
};
data.copy_from_slice(&value.to_le_bytes());
}
8 => {
let value = match modulo_offset {
0x0 => {
(u64::from(self.table_entries[index].msg_addr_hi) << 32)
| u64::from(self.table_entries[index].msg_addr_lo)
}
0x8 => {
(u64::from(self.table_entries[index].vector_ctl) << 32)
| u64::from(self.table_entries[index].msg_data)
}
_ => {
error!("invalid offset");
0
}
};
data.copy_from_slice(&value.to_le_bytes());
}
_ => error!("invalid data length"),
};
}
pub fn write_msix_table(&mut self, offset: u64, data: &[u8]) -> MsixStatus {
let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
let old_entry = self.table_entries[index].clone();
match data.len() {
4 => {
let value = u32::from_le_bytes(data.try_into().unwrap());
match modulo_offset {
0x0 => self.table_entries[index].msg_addr_lo = value,
0x4 => self.table_entries[index].msg_addr_hi = value,
0x8 => self.table_entries[index].msg_data = value,
0xc => self.table_entries[index].vector_ctl = value,
_ => error!("invalid offset"),
};
}
8 => {
let value = u64::from_le_bytes(data.try_into().unwrap());
match modulo_offset {
0x0 => {
self.table_entries[index].msg_addr_lo = (value & 0xffff_ffffu64) as u32;
self.table_entries[index].msg_addr_hi = (value >> 32) as u32;
}
0x8 => {
self.table_entries[index].msg_data = (value & 0xffff_ffffu64) as u32;
self.table_entries[index].vector_ctl = (value >> 32) as u32;
}
_ => error!("invalid offset"),
};
}
_ => error!("invalid data length"),
};
let new_entry = self.table_entries[index].clone();
if self.enabled()
&& !self.masked()
&& self.irq_vec[index].is_none()
&& old_entry.masked()
&& !new_entry.masked()
{
if let Err(e) = self.msix_enable_one(index) {
error!("failed to enable MSI-X vector {}: {}", index, e);
self.table_entries[index].vector_ctl |= MSIX_TABLE_ENTRY_MASK_BIT;
}
return MsixStatus::EntryChanged(index);
}
if self.enabled()
&& (old_entry.msg_addr_lo != new_entry.msg_addr_lo
|| old_entry.msg_addr_hi != new_entry.msg_addr_hi
|| old_entry.msg_data != new_entry.msg_data)
{
if let Some(irqfd_gsi) = &self.irq_vec[index] {
let irq_num = irqfd_gsi.gsi;
if let Err(e) = self.add_msi_route(index as u16, irq_num) {
error!("add_msi_route failed: {}", e);
}
}
}
if !self.masked() {
if old_entry.masked() && !self.table_entries[index].masked() {
if self.get_pba_bit(index as u16) == 1 {
self.inject_msix_and_clear_pba(index);
}
return MsixStatus::EntryChanged(index);
} else if !old_entry.masked() && self.table_entries[index].masked() {
return MsixStatus::EntryChanged(index);
}
}
MsixStatus::NothingToDo
}
pub fn read_pba_entries(&self, offset: u64, data: &mut [u8]) {
let index: usize = (offset / MSIX_PBA_ENTRIES_MODULO) as usize;
let modulo_offset = offset % MSIX_PBA_ENTRIES_MODULO;
match data.len() {
4 => {
let value: u32 = match modulo_offset {
0x0 => (self.pba_entries[index] & 0xffff_ffffu64) as u32,
0x4 => (self.pba_entries[index] >> 32) as u32,
_ => {
error!("invalid offset");
0
}
};
data.copy_from_slice(&value.to_le_bytes());
}
8 => {
let value: u64 = match modulo_offset {
0x0 => self.pba_entries[index],
_ => {
error!("invalid offset");
0
}
};
data.copy_from_slice(&value.to_le_bytes());
}
_ => error!("invalid data length"),
}
}
pub fn write_pba_entries(&mut self, _offset: u64, _data: &[u8]) {
error!("Pending Bit Array is read only");
}
fn set_pba_bit(&mut self, vector: u16, set: bool) {
assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
let mut mask: u64 = (1 << shift) as u64;
if set {
self.pba_entries[index] |= mask;
} else {
mask = !mask;
self.pba_entries[index] &= mask;
}
}
fn get_pba_bit(&self, vector: u16) -> u8 {
assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
((self.pba_entries[index] >> shift) & 0x0000_0001u64) as u8
}
fn inject_msix_and_clear_pba(&mut self, vector: usize) {
if let Some(irq) = &self.irq_vec[vector] {
irq.irqfd.signal().unwrap();
}
self.set_pba_bit(vector as u16, false);
}
pub fn trigger(&mut self, vector: u16) {
if self.table_entries[vector as usize].masked() || self.masked() {
self.set_pba_bit(vector, true);
} else if let Some(irq) = self.irq_vec.get(vector as usize).unwrap_or(&None) {
irq.irqfd.signal().unwrap();
}
}
pub fn get_msi_socket(&self) -> RawDescriptor {
self.msi_device_socket.as_raw_descriptor()
}
pub fn get_irqfd(&self, vector: usize) -> Option<&Event> {
match self.irq_vec.get(vector).unwrap_or(&None) {
Some(irq) => Some(&irq.irqfd),
None => None,
}
}
pub fn destroy(&mut self) {
while let Some(irq) = self.irq_vec.pop() {
if let Some(irq) = irq {
let request = VmIrqRequest::ReleaseOneIrq {
gsi: irq.gsi,
irqfd: irq.irqfd,
};
if self.msi_device_socket.send(&request).is_err() {
continue;
}
let _ = self.msi_device_socket.recv::<VmIrqResponse>();
}
}
}
}
const MSIX_CONFIG_READ_MASK: [u32; 3] = [0xc000_0000, 0, 0];
impl PciCapConfig for MsixConfig {
fn read_mask(&self) -> &'static [u32] {
&MSIX_CONFIG_READ_MASK
}
fn read_reg(&self, reg_idx: usize) -> u32 {
if reg_idx == 0 {
self.read_msix_capability(0)
} else {
0
}
}
fn write_reg(
&mut self,
reg_idx: usize,
offset: u64,
data: &[u8],
) -> Option<Box<dyn PciCapConfigWriteResult>> {
let status = if reg_idx == 0 {
self.write_msix_capability(offset, data)
} else {
MsixStatus::NothingToDo
};
Some(Box::new(status))
}
}
impl AsRawDescriptor for MsixConfig {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.msi_device_socket.as_raw_descriptor()
}
}
#[bitfield]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
pub struct MsixCtrl {
table_size: B10,
reserved: B4,
mask: B1,
enable: B1,
}
#[allow(dead_code)]
#[repr(C)]
#[derive(Clone, Copy, Default, AsBytes, FromZeroes, FromBytes)]
pub struct MsixCap {
_cap_vndr: u8,
_cap_next: u8,
msg_ctl: MsixCtrl,
table: u32,
pba: u32,
}
impl PciCapability for MsixCap {
fn bytes(&self) -> &[u8] {
self.as_bytes()
}
fn id(&self) -> PciCapabilityID {
PciCapabilityID::Msix
}
fn writable_bits(&self) -> Vec<u32> {
vec![0x3000_0000, 0, 0]
}
}
impl MsixCap {
pub fn new(
table_pci_bar: u8,
table_size: u16,
table_off: u32,
pba_pci_bar: u8,
pba_off: u32,
) -> Self {
assert!(table_size < MAX_MSIX_VECTORS_PER_DEVICE);
let mut msg_ctl = MsixCtrl::new();
msg_ctl.set_enable(1);
msg_ctl.set_table_size(table_size - 1);
MsixCap {
_cap_vndr: 0,
_cap_next: 0,
msg_ctl,
table: (table_off & 0xffff_fff8u32) | u32::from(table_pci_bar & 0x7u8),
pba: (pba_off & 0xffff_fff8u32) | u32::from(pba_pci_bar & 0x7u8),
}
}
}
#[cfg(test)]
mod tests {
use std::thread;
use super::*;
#[track_caller]
fn recv_allocate_msi(t: &Tube) -> u32 {
match t.recv::<VmIrqRequest>().unwrap() {
VmIrqRequest::AllocateOneMsiAtGsi { gsi, .. } => gsi,
msg => panic!("unexpected irqchip message: {:?}", msg),
}
}
struct MsiRouteDetails {
gsi: u32,
msi_address: u64,
msi_data: u32,
}
#[track_caller]
fn recv_add_msi_route(t: &Tube) -> MsiRouteDetails {
match t.recv::<VmIrqRequest>().unwrap() {
VmIrqRequest::AddMsiRoute {
gsi,
msi_address,
msi_data,
} => MsiRouteDetails {
gsi,
msi_address,
msi_data,
},
msg => panic!("unexpected irqchip message: {:?}", msg),
}
}
#[track_caller]
fn recv_release_one_irq(t: &Tube) -> u32 {
match t.recv::<VmIrqRequest>().unwrap() {
VmIrqRequest::ReleaseOneIrq { gsi, irqfd: _ } => gsi,
msg => panic!("unexpected irqchip message: {:?}", msg),
}
}
#[track_caller]
fn send_ok(t: &Tube) {
t.send(&VmIrqResponse::Ok).unwrap();
}
#[test]
fn verify_msix_restore_cold_smoke() {
let (irqchip_tube, msix_config_tube) = Tube::pair().unwrap();
let (_unused, unused_config_tube) = Tube::pair().unwrap();
let mut cfg = MsixConfig::new(2, unused_config_tube, 0, "test_device".to_owned());
cfg.table_entries[0].msg_data = 0xd0;
cfg.table_entries[0].msg_addr_lo = 0xa0;
cfg.table_entries[0].msg_addr_hi = 0;
cfg.table_entries[1].msg_data = 0xd1;
cfg.table_entries[1].msg_addr_lo = 0xa1;
cfg.table_entries[1].msg_addr_hi = 0;
cfg.irq_vec = vec![
Some(IrqfdGsi {
gsi: 10,
irqfd: Event::new().unwrap(),
}),
Some(IrqfdGsi {
gsi: 20,
irqfd: Event::new().unwrap(),
}),
];
let snapshot = cfg.snapshot().unwrap();
let irqchip_fake = thread::spawn(move || {
assert_eq!(recv_allocate_msi(&irqchip_tube), 10);
send_ok(&irqchip_tube);
let route_one = recv_add_msi_route(&irqchip_tube);
assert_eq!(route_one.gsi, 10);
assert_eq!(route_one.msi_address, 0xa0);
assert_eq!(route_one.msi_data, 0xd0);
send_ok(&irqchip_tube);
assert_eq!(recv_allocate_msi(&irqchip_tube), 20);
send_ok(&irqchip_tube);
let route_two = recv_add_msi_route(&irqchip_tube);
assert_eq!(route_two.gsi, 20);
assert_eq!(route_two.msi_address, 0xa1);
assert_eq!(route_two.msi_data, 0xd1);
send_ok(&irqchip_tube);
irqchip_tube
});
let mut restored_cfg = MsixConfig::new(10, msix_config_tube, 10, "some_device".to_owned());
restored_cfg.restore(snapshot).unwrap();
irqchip_fake.join().unwrap();
assert_eq!(restored_cfg.pci_id, 0);
assert_eq!(restored_cfg.device_name, "test_device");
}
#[test]
fn verify_msix_restore_warm_smoke() {
let (irqchip_tube, msix_config_tube) = Tube::pair().unwrap();
let mut cfg = MsixConfig::new(2, msix_config_tube, 0, "test_device".to_owned());
cfg.table_entries[0].msg_data = 0xd0;
cfg.table_entries[0].msg_addr_lo = 0xa0;
cfg.table_entries[0].msg_addr_hi = 0;
cfg.table_entries[1].msg_data = 0xd1;
cfg.table_entries[1].msg_addr_lo = 0xa1;
cfg.table_entries[1].msg_addr_hi = 0;
cfg.irq_vec = vec![
Some(IrqfdGsi {
gsi: 10,
irqfd: Event::new().unwrap(),
}),
Some(IrqfdGsi {
gsi: 20,
irqfd: Event::new().unwrap(),
}),
];
let snapshot = cfg.snapshot().unwrap();
let irqchip_fake = thread::spawn(move || {
assert_eq!(recv_release_one_irq(&irqchip_tube), 10);
send_ok(&irqchip_tube);
assert_eq!(recv_release_one_irq(&irqchip_tube), 20);
send_ok(&irqchip_tube);
assert_eq!(recv_allocate_msi(&irqchip_tube), 10);
send_ok(&irqchip_tube);
let route_one = recv_add_msi_route(&irqchip_tube);
assert_eq!(route_one.gsi, 10);
assert_eq!(route_one.msi_address, 0xa0);
assert_eq!(route_one.msi_data, 0xd0);
send_ok(&irqchip_tube);
assert_eq!(recv_allocate_msi(&irqchip_tube), 20);
send_ok(&irqchip_tube);
let route_two = recv_add_msi_route(&irqchip_tube);
assert_eq!(route_two.gsi, 20);
assert_eq!(route_two.msi_address, 0xa1);
assert_eq!(route_two.msi_data, 0xd1);
send_ok(&irqchip_tube);
irqchip_tube
});
cfg.restore(snapshot).unwrap();
irqchip_fake.join().unwrap();
assert_eq!(cfg.pci_id, 0);
assert_eq!(cfg.device_name, "test_device");
}
}