1use std::collections::BTreeMap;
6
7use acpi_tables::aml;
8use acpi_tables::aml::Aml;
9use anyhow::anyhow;
10use anyhow::Context;
11use base::error;
12use base::pagesize;
13use base::warn;
14use base::AsRawDescriptors;
15use base::Event;
16use base::RawDescriptor;
17use base::Result;
18use hypervisor::Datamatch;
19use resources::AllocOptions;
20use resources::SystemAllocator;
21use virtio_sys::virtio_config::VIRTIO_CONFIG_S_ACKNOWLEDGE;
22use virtio_sys::virtio_config::VIRTIO_CONFIG_S_DRIVER;
23use virtio_sys::virtio_config::VIRTIO_CONFIG_S_DRIVER_OK;
24use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FAILED;
25use virtio_sys::virtio_config::VIRTIO_CONFIG_S_FEATURES_OK;
26use virtio_sys::virtio_config::VIRTIO_CONFIG_S_NEEDS_RESET;
27use virtio_sys::virtio_mmio::*;
28use vm_memory::GuestMemory;
29
30use super::*;
31use crate::pci::CrosvmDeviceId;
32use crate::BusAccessInfo;
33use crate::BusDevice;
34use crate::BusDeviceObj;
35use crate::DeviceId;
36use crate::IrqEdgeEvent;
37use crate::Suspendable;
38
39const VIRT_MAGIC: u32 = 0x74726976; const VIRT_VERSION: u8 = 2;
41const VIRT_VENDOR: u32 = 0x4D565243; const VIRTIO_MMIO_REGION_SZ: u64 = 0x200;
43
44pub struct VirtioMmioDevice {
48 device: Box<dyn VirtioDevice>,
49 device_activated: bool,
50
51 interrupt: Option<Interrupt>,
52 interrupt_evt: Option<IrqEdgeEvent>,
53 async_intr_status: bool,
54 queues: Vec<QueueConfig>,
55 queue_evts: Vec<Event>,
56 mem: GuestMemory,
57 device_feature_select: u32,
58 driver_feature_select: u32,
59 queue_select: u16,
60 driver_status: u8,
61 mmio_base: u64,
62 irq_num: u32,
63 config_generation: u32,
64}
65
66impl VirtioMmioDevice {
67 pub fn new(
69 mem: GuestMemory,
70 device: Box<dyn VirtioDevice>,
71 async_intr_status: bool,
72 ) -> Result<Self> {
73 let mut queue_evts = Vec::new();
74 for _ in device.queue_max_sizes() {
75 queue_evts.push(Event::new()?)
76 }
77 let queues = device
78 .queue_max_sizes()
79 .iter()
80 .map(|&s| QueueConfig::new(s, device.features()))
81 .collect();
82
83 Ok(VirtioMmioDevice {
84 device,
85 device_activated: false,
86 interrupt: None,
87 interrupt_evt: None,
88 async_intr_status,
89 queues,
90 queue_evts,
91 mem,
92 device_feature_select: 0,
93 driver_feature_select: 0,
94 queue_select: 0,
95 driver_status: 0,
96 mmio_base: 0,
97 irq_num: 0,
98 config_generation: 0,
99 })
100 }
101 pub fn ioevents(&self) -> Vec<(&Event, u64, Datamatch)> {
102 self.queue_evts
103 .iter()
104 .enumerate()
105 .map(|(i, event)| {
106 (
107 event,
108 self.mmio_base + VIRTIO_MMIO_QUEUE_NOTIFY as u64,
109 Datamatch::U32(Some(i.try_into().unwrap())),
110 )
111 })
112 .collect()
113 }
114
115 fn is_driver_ready(&self) -> bool {
116 let ready_bits = (VIRTIO_CONFIG_S_ACKNOWLEDGE
117 | VIRTIO_CONFIG_S_DRIVER
118 | VIRTIO_CONFIG_S_DRIVER_OK
119 | VIRTIO_CONFIG_S_FEATURES_OK) as u8;
120 self.driver_status == ready_bits && self.driver_status & VIRTIO_CONFIG_S_FAILED as u8 == 0
121 }
122
123 fn is_reset_requested(&self) -> bool {
125 self.driver_status == DEVICE_RESET as u8
126 }
127
128 fn device_type(&self) -> u32 {
129 self.device.device_type() as u32
130 }
131
132 fn activate(&mut self) -> anyhow::Result<()> {
134 let interrupt_evt = if let Some(ref evt) = self.interrupt_evt {
135 evt.try_clone()
136 .with_context(|| format!("{} failed to clone interrupt_evt", self.debug_label()))?
137 } else {
138 return Err(anyhow!("{} interrupt_evt is none", self.debug_label()));
139 };
140
141 let mem = self.mem.clone();
142 let interrupt = Interrupt::new_mmio(interrupt_evt, self.async_intr_status);
143 self.interrupt = Some(interrupt.clone());
144
145 let queues = self
147 .queues
148 .iter_mut()
149 .zip(self.queue_evts.iter())
150 .enumerate()
151 .filter(|(_, (q, _))| q.ready())
152 .map(|(queue_index, (queue, evt))| {
153 let queue_evt = evt.try_clone().context("failed to clone queue_evt")?;
154 Ok((
155 queue_index,
156 queue
157 .activate(&mem, queue_evt, interrupt.clone())
158 .context("failed to activate queue")?,
159 ))
160 })
161 .collect::<anyhow::Result<BTreeMap<usize, Queue>>>()?;
162
163 if let Err(e) = self.device.activate(mem, interrupt, queues) {
164 error!("{} activate failed: {:#}", self.debug_label(), e);
165 self.driver_status |= VIRTIO_CONFIG_S_NEEDS_RESET as u8;
166 } else {
167 self.device_activated = true;
168 }
169
170 Ok(())
171 }
172
173 fn read_mmio(&self, info: BusAccessInfo, data: &mut [u8]) {
174 if data.len() != std::mem::size_of::<u32>() {
175 warn!(
176 "{}: unsupported read length {}, only support 4 bytes read",
177 self.debug_label(),
178 data.len()
179 );
180 return;
181 }
182
183 if info.offset >= VIRTIO_MMIO_CONFIG as u64 {
184 self.device
185 .read_config(info.offset - VIRTIO_MMIO_CONFIG as u64, data);
186 return;
187 }
188
189 let val = match info.offset as u32 {
190 VIRTIO_MMIO_MAGIC_VALUE => VIRT_MAGIC,
191 VIRTIO_MMIO_VERSION => VIRT_VERSION.into(), VIRTIO_MMIO_DEVICE_ID => self.device_type(),
193 VIRTIO_MMIO_VENDOR_ID => VIRT_VENDOR,
194 VIRTIO_MMIO_DEVICE_FEATURES => {
195 if self.device_feature_select < 2 {
196 (self.device.features() >> (self.device_feature_select * 32)) as u32
197 } else {
198 0
199 }
200 }
201 VIRTIO_MMIO_QUEUE_NUM_MAX => self.with_queue(|q| q.max_size()).unwrap_or(0).into(),
202 VIRTIO_MMIO_QUEUE_PFN => {
203 warn!(
204 "{}: read from legacy register {}, in non-legacy mode",
205 self.debug_label(),
206 info.offset,
207 );
208 0
209 }
210 VIRTIO_MMIO_QUEUE_READY => self.with_queue(|q| q.ready()).unwrap_or(false).into(),
211 VIRTIO_MMIO_INTERRUPT_STATUS => {
212 if let Some(interrupt) = &self.interrupt {
213 interrupt.read_interrupt_status().into()
214 } else {
215 0
216 }
217 }
218 VIRTIO_MMIO_STATUS => self.driver_status.into(),
219 VIRTIO_MMIO_CONFIG_GENERATION => self.config_generation,
220 _ => {
221 warn!("{}: unsupported read address {}", self.debug_label(), info);
222 return;
223 }
224 };
225
226 let val_arr = val.to_le_bytes();
227 data.copy_from_slice(&val_arr);
228 }
229
230 fn write_mmio(&mut self, info: BusAccessInfo, data: &[u8]) {
231 if data.len() != std::mem::size_of::<u32>() {
232 warn!(
233 "{}: unsupported write length {}, only support 4 bytes write",
234 self.debug_label(),
235 data.len()
236 );
237 return;
238 }
239
240 if info.offset >= VIRTIO_MMIO_CONFIG as u64 {
241 self.device
242 .write_config(info.offset - VIRTIO_MMIO_CONFIG as u64, data);
243 return;
244 }
245
246 let val = u32::from_le_bytes(data.try_into().unwrap());
248
249 macro_rules! hi {
250 ($q:expr, $get:ident, $set:ident, $x:expr) => {
251 $q.$set(($q.$get() & 0xffffffff) | (($x as u64) << 32))
252 };
253 }
254 macro_rules! lo {
255 ($q:expr, $get:ident, $set:ident, $x:expr) => {
256 $q.$set(($q.$get() & !0xffffffff) | ($x as u64))
257 };
258 }
259
260 match info.offset as u32 {
261 VIRTIO_MMIO_DEVICE_FEATURES_SEL => self.device_feature_select = val,
262 VIRTIO_MMIO_DRIVER_FEATURES_SEL => self.driver_feature_select = val,
263 VIRTIO_MMIO_DRIVER_FEATURES => {
264 if self.driver_feature_select < 2 {
265 let features: u64 = (val as u64) << (self.driver_feature_select * 32);
266 self.device.ack_features(features);
267 for queue in self.queues.iter_mut() {
268 queue.ack_features(features);
269 }
270 } else {
271 warn!(
272 "invalid ack_features (page {}, value 0x{:x})",
273 self.driver_feature_select, val
274 );
275 }
276 }
277 VIRTIO_MMIO_GUEST_PAGE_SIZE => warn!(
278 "{}: write to legacy register {}, in non-legacy mode",
279 self.debug_label(),
280 info.offset,
281 ),
282 VIRTIO_MMIO_QUEUE_SEL => self.queue_select = val as u16,
283 VIRTIO_MMIO_QUEUE_NUM => self.with_queue_mut(|q| q.set_size(val as u16)),
284 VIRTIO_MMIO_QUEUE_ALIGN => warn!(
285 "{}: write to legacy register {}, in non-legacy mode",
286 self.debug_label(),
287 info.offset,
288 ),
289 VIRTIO_MMIO_QUEUE_PFN => warn!(
290 "{}: write to legacy register {}, in non-legacy mode",
291 self.debug_label(),
292 info.offset,
293 ),
294 VIRTIO_MMIO_QUEUE_READY => self.with_queue_mut(|q| q.set_ready(val == 1)),
295 VIRTIO_MMIO_QUEUE_NOTIFY => {} VIRTIO_MMIO_INTERRUPT_ACK => {
297 if let Some(interrupt) = &self.interrupt {
298 interrupt.clear_interrupt_status_bits(val as u8)
299 }
300 }
301 VIRTIO_MMIO_STATUS => self.driver_status = val as u8,
302 VIRTIO_MMIO_QUEUE_DESC_LOW => {
303 self.with_queue_mut(|q| lo!(q, desc_table, set_desc_table, val))
304 }
305 VIRTIO_MMIO_QUEUE_DESC_HIGH => {
306 self.with_queue_mut(|q| hi!(q, desc_table, set_desc_table, val))
307 }
308 VIRTIO_MMIO_QUEUE_AVAIL_LOW => {
309 self.with_queue_mut(|q| lo!(q, avail_ring, set_avail_ring, val))
310 }
311 VIRTIO_MMIO_QUEUE_AVAIL_HIGH => {
312 self.with_queue_mut(|q| hi!(q, avail_ring, set_avail_ring, val))
313 }
314 VIRTIO_MMIO_QUEUE_USED_LOW => {
315 self.with_queue_mut(|q| lo!(q, used_ring, set_used_ring, val))
316 }
317 VIRTIO_MMIO_QUEUE_USED_HIGH => {
318 self.with_queue_mut(|q| hi!(q, used_ring, set_used_ring, val))
319 }
320 _ => {
321 warn!("{}: unsupported write address {}", self.debug_label(), info);
322 return;
323 }
324 };
325
326 if !self.device_activated && self.is_driver_ready() {
327 if let Err(e) = self.activate() {
328 error!("failed to activate device: {:#}", e);
329 }
330 }
331
332 if self.device_activated && self.is_reset_requested() {
334 if let Err(e) = self.device.reset() {
335 error!("failed to reset {} device: {:#}", self.debug_label(), e);
336 } else {
337 self.device_activated = false;
338 self.queues.iter_mut().for_each(QueueConfig::reset);
340 self.queue_select = 0;
342 self.interrupt = None;
344 }
345 }
346 }
347
348 fn with_queue<U, F>(&self, f: F) -> Option<U>
349 where
350 F: FnOnce(&QueueConfig) -> U,
351 {
352 self.queues.get(self.queue_select as usize).map(f)
353 }
354
355 fn with_queue_mut<F>(&mut self, f: F)
356 where
357 F: FnOnce(&mut QueueConfig),
358 {
359 if let Some(queue) = self.queues.get_mut(self.queue_select as usize) {
360 f(queue);
361 }
362 }
363
364 pub fn allocate_regions(
365 &mut self,
366 resources: &mut SystemAllocator,
367 ) -> std::result::Result<Vec<(u64, u64)>, resources::Error> {
368 let mut ranges = Vec::new();
369 let alloc_id = resources.get_anon_alloc();
370 let start_addr = resources.allocate_mmio(
371 VIRTIO_MMIO_REGION_SZ,
372 alloc_id,
373 "virtio_mmio".to_string(),
374 AllocOptions::new().align(pagesize() as u64),
375 )?;
376 self.mmio_base = start_addr;
377 ranges.push((start_addr, VIRTIO_MMIO_REGION_SZ));
378 Ok(ranges)
379 }
380
381 pub fn assign_irq(&mut self, irq_evt: &IrqEdgeEvent, irq_num: u32) {
382 self.interrupt_evt = Some(irq_evt.try_clone().unwrap());
383 self.irq_num = irq_num;
384 }
385
386 pub fn keep_rds(&self) -> Vec<RawDescriptor> {
387 let mut rds = self.device.keep_rds();
388 if let Some(interrupt_evt) = &self.interrupt_evt {
389 rds.extend(interrupt_evt.as_raw_descriptors());
390 }
391 rds
392 }
393
394 fn on_device_sandboxed(&mut self) {
395 self.device.on_device_sandboxed();
396 }
397}
398
399impl Aml for VirtioMmioDevice {
400 fn to_aml_bytes(&self, bytes: &mut Vec<u8>) {
401 aml::Device::new(
402 "VIOM".into(),
403 vec![
404 &aml::Name::new("_HID".into(), &"LNRO0005"),
405 &aml::Name::new(
406 "_CRS".into(),
407 &aml::ResourceTemplate::new(vec![
408 &aml::AddressSpace::new_memory(
409 aml::AddressSpaceCachable::NotCacheable,
410 true,
411 self.mmio_base,
412 self.mmio_base + VIRTIO_MMIO_REGION_SZ - 1,
413 ),
414 &aml::Interrupt::new(true, true, false, false, self.irq_num),
415 ]),
416 ),
417 ],
418 )
419 .to_aml_bytes(bytes);
420 }
421}
422
423impl BusDeviceObj for VirtioMmioDevice {}
424
425impl BusDevice for VirtioMmioDevice {
426 fn debug_label(&self) -> String {
427 format!("mmio{}", self.device.debug_label())
428 }
429
430 fn device_id(&self) -> DeviceId {
431 CrosvmDeviceId::VirtioMmio.into()
432 }
433
434 fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
435 self.read_mmio(info, data)
436 }
437
438 fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
439 self.write_mmio(info, data)
440 }
441
442 fn on_sandboxed(&mut self) {
443 self.on_device_sandboxed();
444 }
445}
446
447impl Suspendable for VirtioMmioDevice {}