1use std::fs::File;
6use std::sync::Arc;
7
8use anyhow::bail;
9use anyhow::Context;
10use anyhow::Result;
11use base::error;
12use base::pagesize;
13use base::AsRawDescriptor;
14use base::AsRawDescriptors;
15use base::Event;
16use base::MappedRegion;
17use base::MemoryMapping;
18use base::MemoryMappingBuilder;
19use base::Protection;
20use base::RawDescriptor;
21use hypervisor::MemCacheType;
22use hypervisor::Vm;
23use resources::SystemAllocator;
24use vfio_sys::*;
25use vm_control::api::VmMemoryClient;
26use vm_control::VmMemoryDestination;
27use vm_control::VmMemorySource;
28use vm_memory::GuestAddress;
29
30use crate::pci::CrosvmDeviceId;
31use crate::vfio::VfioDevice;
32use crate::vfio::VfioError;
33use crate::vfio::VfioIrq;
34use crate::BusAccessInfo;
35use crate::BusDevice;
36use crate::BusDeviceObj;
37use crate::DeviceId;
38use crate::IommuDevType;
39use crate::IrqEdgeEvent;
40use crate::IrqLevelEvent;
41use crate::Suspendable;
42
43struct MmioInfo {
44 index: usize,
45 start: u64,
46 length: u64,
47}
48
49pub struct VfioPlatformDevice {
50 device: Arc<VfioDevice>,
51 interrupt_edge_evt: Vec<IrqEdgeEvent>,
52 interrupt_level_evt: Vec<IrqLevelEvent>,
53 mmio_regions: Vec<MmioInfo>,
54 vm_memory_client: VmMemoryClient,
55 mem: Vec<MemoryMapping>,
57}
58
59impl BusDevice for VfioPlatformDevice {
60 fn device_id(&self) -> DeviceId {
61 CrosvmDeviceId::VfioPlatformDevice.into()
62 }
63
64 fn debug_label(&self) -> String {
65 format!("vfio {} device", self.device.device_name())
66 }
67
68 fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
69 self.read_mmio(info.address, data)
70 }
71
72 fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
73 self.write_mmio(info.address, data)
74 }
75}
76
77impl Suspendable for VfioPlatformDevice {}
78
79impl BusDeviceObj for VfioPlatformDevice {
80 fn as_platform_device(&self) -> Option<&VfioPlatformDevice> {
81 Some(self)
82 }
83 fn as_platform_device_mut(&mut self) -> Option<&mut VfioPlatformDevice> {
84 Some(self)
85 }
86 fn into_platform_device(self: Box<Self>) -> Option<Box<VfioPlatformDevice>> {
87 Some(self)
88 }
89}
90
91impl VfioPlatformDevice {
92 pub fn new(device: VfioDevice, vm_memory_client: VmMemoryClient) -> Self {
94 let dev = Arc::new(device);
95 VfioPlatformDevice {
96 device: dev,
97 interrupt_edge_evt: Vec::new(),
98 interrupt_level_evt: Vec::new(),
99 mmio_regions: Vec::new(),
100 vm_memory_client,
101 mem: Vec::new(),
102 }
103 }
104
105 pub fn get_platform_irqs(&self) -> Result<Vec<VfioIrq>, VfioError> {
106 self.device.get_irqs()
107 }
108
109 pub fn irq_is_automask(&self, irq: &VfioIrq) -> bool {
110 irq.flags & VFIO_IRQ_INFO_AUTOMASKED != 0
111 }
112
113 fn setup_irq_resample(&mut self, resample_evt: &Event, index: u32) -> Result<()> {
114 self.device.irq_mask(index).context("Intx mask failed")?;
115 self.device
116 .resample_virq_enable(resample_evt, index)
117 .context("resample enable failed")?;
118 self.device
119 .irq_unmask(index)
120 .context("Intx unmask failed")?;
121 Ok(())
122 }
123
124 pub fn assign_edge_platform_irq(&mut self, irq_evt: &IrqEdgeEvent, index: u32) -> Result<()> {
125 let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
126 self.device
127 .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
128 .context("platform irq enable failed")?;
129 self.interrupt_edge_evt.push(interrupt_evt);
130 Ok(())
131 }
132
133 pub fn assign_level_platform_irq(&mut self, irq_evt: &IrqLevelEvent, index: u32) -> Result<()> {
134 let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
135 self.device
136 .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
137 .context("platform irq enable failed")?;
138 if let Err(e) = self.setup_irq_resample(interrupt_evt.get_resample(), index) {
139 self.disable_irqs(index);
140 bail!("failed to set up irq resampling: {}", e);
141 }
142 self.interrupt_level_evt.push(interrupt_evt);
143 Ok(())
144 }
145
146 fn find_region(&self, addr: u64) -> Option<MmioInfo> {
147 for mmio_info in self.mmio_regions.iter() {
148 if addr >= mmio_info.start && addr < mmio_info.start + mmio_info.length {
149 return Some(MmioInfo {
150 index: mmio_info.index,
151 start: mmio_info.start,
152 length: mmio_info.length,
153 });
154 }
155 }
156 None
157 }
158
159 pub fn allocate_regions(
160 &mut self,
161 resources: &mut SystemAllocator,
162 ) -> Result<Vec<(u64, u64)>, resources::Error> {
163 let mut ranges = Vec::new();
164 for i in 0..self.device.get_region_count() {
165 let size = self.device.get_region_size(i);
166 let alloc_id = resources.get_anon_alloc();
167 let allocator = resources
168 .mmio_platform_allocator()
169 .ok_or(resources::Error::MissingPlatformMMIOAddresses)?;
170 let start_addr = allocator.allocate_with_align(
171 size,
172 alloc_id,
173 "vfio_mmio".to_string(),
174 pagesize() as u64,
175 )?;
176 ranges.push((start_addr, size));
177
178 self.mmio_regions.push(MmioInfo {
179 index: i,
180 start: start_addr,
181 length: size,
182 });
183 }
184 Ok(ranges)
185 }
186
187 fn region_mmap_early(&self, vm: &mut impl Vm, index: usize, start_addr: u64) {
188 if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP == 0 {
189 return;
190 }
191
192 for mmap in &self.device.get_region_mmap(index) {
193 let mmap_offset = mmap.offset;
194 let mmap_size = mmap.size;
195 let guest_map_start = start_addr + mmap_offset;
196 let region_offset = self.device.get_region_offset(index);
197 let offset = region_offset + mmap_offset;
198
199 let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
200 .from_file(self.device.device_file())
201 .offset(offset)
202 .build()
203 {
204 Ok(v) => v,
205 Err(e) => {
206 error!("{e}, index: {index}, start_addr:{start_addr:#x}, offset:{offset:#x}");
207 break;
208 }
209 };
210
211 let host = mmap.as_ptr();
212 let guest_addr = GuestAddress(guest_map_start);
213 if let Err(e) = vm.add_memory_region(
214 guest_addr,
215 Box::new(mmap),
216 false,
217 false,
218 MemCacheType::CacheCoherent,
219 ) {
220 error!("{e}, index: {index}, guest_addr:{guest_addr}, host:{host:?}");
221 break;
222 }
223 }
224 }
225
226 pub fn regions_mmap_early(&mut self, vm: &mut impl Vm) {
232 for mmio_info in self.mmio_regions.iter() {
233 self.region_mmap_early(vm, mmio_info.index, mmio_info.start);
234 }
235 }
236
237 fn region_mmap(&self, index: usize, start_addr: u64) -> Vec<MemoryMapping> {
238 let mut mem_map: Vec<MemoryMapping> = Vec::new();
239 if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP != 0 {
240 let mmaps = self.device.get_region_mmap(index);
241 if mmaps.is_empty() {
242 return mem_map;
243 }
244
245 for mmap in mmaps.iter() {
246 let mmap_offset = mmap.offset;
247 let mmap_size = mmap.size;
248 let guest_map_start = start_addr + mmap_offset;
249 let region_offset = self.device.get_region_offset(index);
250 let offset = region_offset + mmap_offset;
251 let descriptor = match self.device.device_file().try_clone() {
252 Ok(device_file) => device_file.into(),
253 Err(_) => break,
254 };
255 match self.vm_memory_client.register_memory(
256 VmMemorySource::Descriptor {
257 descriptor,
258 offset,
259 size: mmap_size,
260 },
261 VmMemoryDestination::GuestPhysicalAddress(guest_map_start),
262 Protection::read_write(),
263 MemCacheType::CacheCoherent,
264 ) {
265 Ok(_region) => {
266 let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
270 .from_file(self.device.device_file())
271 .offset(offset)
272 .build()
273 {
274 Ok(v) => v,
275 Err(_e) => break,
276 };
277 let host = mmap.as_ptr() as u64;
278 match unsafe {
283 self.device
284 .vfio_dma_map(guest_map_start, mmap_size, host, true)
285 } {
286 Ok(_) => mem_map.push(mmap),
287 Err(e) => {
288 error!(
289 "{}, index: {}, start_addr:0x{:x}, host:0x{:x}",
290 e, index, start_addr, host
291 );
292 break;
293 }
294 }
295 }
296 Err(e) => {
297 error!("register_memory failed: {}", e);
298 break;
299 }
300 }
301 }
302 }
303
304 mem_map
305 }
306
307 fn regions_mmap(&mut self) {
308 for mmio_info in self.mmio_regions.iter() {
309 let mut mem_map = self.region_mmap(mmio_info.index, mmio_info.start);
310 self.mem.append(&mut mem_map);
311 }
312 }
313
314 fn disable_irqs(&mut self, index: u32) {
315 if let Err(e) = self.device.irq_disable(index) {
316 error!("Platform irq disable failed: {}", e);
317 }
318 }
319
320 fn read_mmio(&mut self, addr: u64, data: &mut [u8]) {
321 if let Some(mmio_info) = self.find_region(addr) {
322 let offset = addr - mmio_info.start;
323 let index = mmio_info.index;
324 self.device.region_read(index, data, offset);
325 }
326 self.regions_mmap();
329 }
330
331 fn write_mmio(&mut self, addr: u64, data: &[u8]) {
332 if let Some(mmio_info) = self.find_region(addr) {
333 let offset = addr - mmio_info.start;
334 let index = mmio_info.index;
335 self.device.region_write(index, data, offset);
336 }
337 self.regions_mmap();
340 }
341
342 pub fn keep_rds(&self) -> Vec<RawDescriptor> {
343 let mut rds = self.device.keep_rds();
344
345 for irq_evt in self.interrupt_edge_evt.iter() {
346 rds.extend(irq_evt.as_raw_descriptors());
347 }
348
349 for irq_evt in self.interrupt_level_evt.iter() {
350 rds.extend(irq_evt.as_raw_descriptors());
351 }
352
353 rds.push(self.vm_memory_client.as_raw_descriptor());
354 rds
355 }
356
357 pub fn device_file(&self) -> &File {
359 self.device.device_file()
360 }
361
362 pub fn dt_symbol(&self) -> Option<&str> {
364 self.device.dt_symbol()
365 }
366
367 pub fn iommu(&self) -> Option<(IommuDevType, Option<u32>, &[u32])> {
370 self.device.iommu()
371 }
372}