1use std::fmt;
6use std::sync::atomic::AtomicUsize;
7use std::sync::atomic::Ordering;
8use std::sync::Arc;
9#[cfg(target_arch = "x86_64")]
10use std::time::Instant;
11
12#[cfg(target_arch = "x86_64")]
13use base::error;
14use base::Event;
15use base::EventToken;
16use base::WaitContext;
17use base::WorkerThread;
18#[cfg(target_arch = "x86_64")]
19use metrics::log_metric;
20#[cfg(target_arch = "x86_64")]
21use metrics::MetricEventType;
22use serde::Deserialize;
23use serde::Serialize;
24use sync::Mutex;
25
26use super::INTERRUPT_STATUS_CONFIG_CHANGED;
27use super::INTERRUPT_STATUS_USED_RING;
28use super::VIRTIO_MSI_NO_VECTOR;
29#[cfg(target_arch = "x86_64")]
30use crate::acpi::PmWakeupEvent;
31use crate::irq_event::IrqEdgeEvent;
32use crate::irq_event::IrqLevelEvent;
33use crate::pci::MsixConfig;
34
35struct TransportPci {
36 irq_evt_lvl: IrqLevelEvent,
37 msix_config: Option<Arc<Mutex<MsixConfig>>>,
38 config_msix_vector: u16,
39}
40
41enum Transport {
42 Pci {
43 pci: TransportPci,
44 },
45 Mmio {
46 irq_evt_edge: IrqEdgeEvent,
47 },
48 VhostUser {
49 call_evt: Event,
50 signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
51 },
52}
53
54struct InterruptInner {
55 interrupt_status: AtomicUsize,
56 transport: Transport,
57 async_intr_status: bool,
58 pm_state: Mutex<PmState>,
59}
60
61impl InterruptInner {
62 fn update_interrupt_status(&self, interrupt_status_mask: u32) -> bool {
66 self.interrupt_status
71 .fetch_or(interrupt_status_mask as usize, Ordering::SeqCst)
72 == 0
73 || self.async_intr_status
74 }
75}
76
77#[derive(Clone)]
78pub struct Interrupt {
79 inner: Arc<InterruptInner>,
80}
81
82impl fmt::Debug for Interrupt {
83 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
84 write!(f, "Interrupt")
85 }
86}
87
88#[derive(Serialize, Deserialize)]
89pub struct InterruptSnapshot {
90 interrupt_status: usize,
91}
92
93impl Interrupt {
94 pub fn signal(&self, vector: u16, interrupt_status_mask: u32) {
99 if self
100 .inner
101 .pm_state
102 .lock()
103 .handle_interrupt(vector, interrupt_status_mask)
104 {
105 return;
106 }
107
108 match &self.inner.transport {
109 Transport::Pci { pci } => {
110 if let Some(msix_config) = &pci.msix_config {
112 let mut msix_config = msix_config.lock();
113 if msix_config.enabled() {
114 if vector != VIRTIO_MSI_NO_VECTOR {
115 msix_config.trigger(vector);
116 }
117 return;
118 }
119 }
120
121 if self.inner.update_interrupt_status(interrupt_status_mask) {
122 pci.irq_evt_lvl.trigger().unwrap();
123 }
124 }
125 Transport::Mmio { irq_evt_edge } => {
126 if self.inner.update_interrupt_status(interrupt_status_mask) {
127 irq_evt_edge.trigger().unwrap();
128 }
129 }
130 Transport::VhostUser { call_evt, .. } => {
131 call_evt.signal().unwrap();
135 }
136 }
137 }
138
139 pub fn signal_used_queue(&self, vector: u16) {
141 self.signal(vector, INTERRUPT_STATUS_USED_RING)
142 }
143
144 pub fn signal_config_changed(&self) {
146 match &self.inner.as_ref().transport {
147 Transport::Pci { pci } => {
148 self.signal(pci.config_msix_vector, INTERRUPT_STATUS_CONFIG_CHANGED)
149 }
150 Transport::Mmio { .. } => {
151 self.signal(VIRTIO_MSI_NO_VECTOR, INTERRUPT_STATUS_CONFIG_CHANGED)
152 }
153 Transport::VhostUser {
154 signal_config_changed_fn,
155 ..
156 } => signal_config_changed_fn(),
157 }
158 }
159
160 fn get_resample_evt(&self) -> Option<&Event> {
162 match &self.inner.as_ref().transport {
163 Transport::Pci { pci } => Some(pci.irq_evt_lvl.get_resample()),
164 _ => None,
165 }
166 }
167
168 pub fn spawn_resample_thread(&self) -> Option<WorkerThread<()>> {
169 if self.get_resample_evt().is_some() {
170 let interrupt = self.clone();
171 Some(WorkerThread::start("crosvm_resample", move |kill_evt| {
173 interrupt_resample_thread(kill_evt, interrupt)
174 }))
175 } else {
176 None
177 }
178 }
179}
180
181impl Interrupt {
182 pub fn new(
183 irq_evt_lvl: IrqLevelEvent,
184 msix_config: Option<Arc<Mutex<MsixConfig>>>,
185 config_msix_vector: u16,
186 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
187 ) -> Interrupt {
188 Interrupt {
189 inner: Arc::new(InterruptInner {
190 interrupt_status: AtomicUsize::new(0),
191 async_intr_status: false,
192 transport: Transport::Pci {
193 pci: TransportPci {
194 irq_evt_lvl,
195 msix_config,
196 config_msix_vector,
197 },
198 },
199 pm_state: PmState::new(
200 #[cfg(target_arch = "x86_64")]
201 wakeup_event,
202 ),
203 }),
204 }
205 }
206
207 pub fn new_from_snapshot(
211 irq_evt_lvl: IrqLevelEvent,
212 msix_config: Option<Arc<Mutex<MsixConfig>>>,
213 config_msix_vector: u16,
214 snapshot: InterruptSnapshot,
215 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
216 ) -> Interrupt {
217 Interrupt {
218 inner: Arc::new(InterruptInner {
219 interrupt_status: AtomicUsize::new(snapshot.interrupt_status),
220 async_intr_status: false,
221 transport: Transport::Pci {
222 pci: TransportPci {
223 irq_evt_lvl,
224 msix_config,
225 config_msix_vector,
226 },
227 },
228 pm_state: PmState::new(
229 #[cfg(target_arch = "x86_64")]
230 wakeup_event,
231 ),
232 }),
233 }
234 }
235
236 pub fn new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt {
237 Interrupt {
238 inner: Arc::new(InterruptInner {
239 interrupt_status: AtomicUsize::new(0),
240 transport: Transport::Mmio { irq_evt_edge },
241 async_intr_status,
242 pm_state: PmState::new(
243 #[cfg(target_arch = "x86_64")]
244 None,
245 ),
246 }),
247 }
248 }
249
250 pub fn new_vhost_user(
253 call_evt: Event,
254 signal_config_changed_fn: Box<dyn Fn() + Send + Sync>,
255 ) -> Interrupt {
256 Interrupt {
257 inner: Arc::new(InterruptInner {
258 interrupt_status: AtomicUsize::new(0),
259 transport: Transport::VhostUser {
260 call_evt,
261 signal_config_changed_fn,
262 },
263 async_intr_status: false,
264 pm_state: PmState::new(
265 #[cfg(target_arch = "x86_64")]
266 None,
267 ),
268 }),
269 }
270 }
271
272 #[cfg(test)]
273 pub fn new_for_test() -> Interrupt {
274 Interrupt::new(
275 IrqLevelEvent::new().unwrap(),
276 None,
277 VIRTIO_MSI_NO_VECTOR,
278 #[cfg(target_arch = "x86_64")]
279 None,
280 )
281 }
282
283 #[cfg(test)]
284 pub fn new_for_test_with_msix() -> Interrupt {
285 let (_, unused_config_tube) = base::Tube::pair().unwrap();
286 let msix_vectors = 2;
287 let msix_cfg = MsixConfig::new(
288 msix_vectors,
289 unused_config_tube,
290 0,
291 "test_device".to_owned(),
292 );
293
294 Interrupt::new(
295 IrqLevelEvent::new().unwrap(),
296 Some(Arc::new(Mutex::new(msix_cfg))),
297 msix_vectors,
298 #[cfg(target_arch = "x86_64")]
299 None,
300 )
301 }
302
303 pub fn get_interrupt_evt(&self) -> &Event {
305 match &self.inner.as_ref().transport {
306 Transport::Pci { pci } => pci.irq_evt_lvl.get_trigger(),
307 Transport::Mmio { irq_evt_edge } => irq_evt_edge.get_trigger(),
308 Transport::VhostUser { call_evt, .. } => call_evt,
309 }
310 }
311
312 pub fn get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>> {
314 match &self.inner.as_ref().transport {
315 Transport::Pci { pci } => &pci.msix_config,
316 _ => &None,
317 }
318 }
319
320 pub fn read_interrupt_status(&self) -> u8 {
322 self.inner.interrupt_status.load(Ordering::SeqCst) as u8
323 }
324
325 pub fn read_and_reset_interrupt_status(&self) -> u8 {
327 self.inner.interrupt_status.swap(0, Ordering::SeqCst) as u8
328 }
329
330 pub fn clear_interrupt_status_bits(&self, mask: u8) {
332 self.inner
333 .interrupt_status
334 .fetch_and(!(mask as usize), Ordering::SeqCst);
335 }
336
337 pub fn snapshot(&self) -> InterruptSnapshot {
339 InterruptSnapshot {
340 interrupt_status: self.inner.interrupt_status.load(Ordering::SeqCst),
341 }
342 }
343
344 pub fn set_suspended(&self, suspended: bool) {
345 let retrigger_evts = self.inner.pm_state.lock().set_suspended(suspended);
346 for (vector, interrupt_status_mask) in retrigger_evts.into_iter() {
347 self.signal(vector, interrupt_status_mask);
348 }
349 }
350
351 #[cfg(target_arch = "x86_64")]
352 pub fn set_wakeup_event_active(&self, active: bool) {
353 self.inner.pm_state.lock().set_wakeup_event_active(active);
354 }
355}
356
357#[cfg(target_arch = "x86_64")]
358struct WakeupState {
359 wakeup_event: PmWakeupEvent,
360 wakeup_enabled: bool,
361 armed_time: Instant,
362 metrics_event: MetricEventType,
363 wakeup_clear_evt: Option<Event>,
364}
365
366#[cfg(target_arch = "x86_64")]
367impl WakeupState {
368 fn new(wakeup_event: Option<(PmWakeupEvent, MetricEventType)>) -> Option<Self> {
369 wakeup_event.map(|(wakeup_event, metrics_event)| Self {
370 wakeup_event,
371 wakeup_enabled: false,
372 armed_time: Instant::now(),
374 metrics_event,
375 wakeup_clear_evt: None,
376 })
377 }
378
379 fn trigger_wakeup(&mut self) {
380 if self.wakeup_clear_evt.is_some() {
381 return;
382 }
383
384 let elapsed = self.armed_time.elapsed().as_millis();
385 log_metric(
386 self.metrics_event.clone(),
387 elapsed.try_into().unwrap_or(i64::MAX),
388 );
389
390 match self.wakeup_event.trigger_wakeup() {
391 Ok(clear_evt) => self.wakeup_clear_evt = clear_evt,
392 Err(err) => error!("Wakeup trigger failed {:?}", err),
393 }
394 }
395}
396
397struct PmState {
399 suspended: bool,
403 pending_signals: Vec<(u16, u32)>,
406 #[cfg(target_arch = "x86_64")]
407 wakeup_state: Option<WakeupState>,
408}
409
410impl PmState {
411 fn new(
412 #[cfg(target_arch = "x86_64")] wakeup_event: Option<(PmWakeupEvent, MetricEventType)>,
413 ) -> Mutex<Self> {
414 Mutex::new(Self {
415 suspended: false,
416 pending_signals: Vec::new(),
417 #[cfg(target_arch = "x86_64")]
418 wakeup_state: WakeupState::new(wakeup_event),
419 })
420 }
421
422 fn handle_interrupt(&mut self, vector: u16, mask: u32) -> bool {
423 if self.suspended {
424 self.pending_signals.push((vector, mask));
425 #[cfg(target_arch = "x86_64")]
426 if let Some(wakeup_state) = self.wakeup_state.as_mut() {
427 if wakeup_state.wakeup_enabled {
428 wakeup_state.trigger_wakeup();
429 }
430 }
431 }
432 self.suspended
433 }
434
435 fn set_suspended(&mut self, suspended: bool) -> Vec<(u16, u32)> {
436 self.suspended = suspended;
437 std::mem::take(&mut self.pending_signals)
438 }
439
440 #[cfg(target_arch = "x86_64")]
441 fn set_wakeup_event_active(&mut self, active: bool) {
442 let Some(wakeup_state) = self.wakeup_state.as_mut() else {
443 return;
444 };
445
446 wakeup_state.wakeup_enabled = active;
447 if active {
448 wakeup_state.armed_time = Instant::now();
449 if !self.pending_signals.is_empty() {
450 wakeup_state.trigger_wakeup();
451 }
452 } else if let Some(clear_evt) = wakeup_state.wakeup_clear_evt.take() {
453 if let Err(e) = clear_evt.signal() {
454 error!("failed to signal clear event {}", e);
455 }
456 }
457 }
458}
459
460fn interrupt_resample_thread(kill_evt: Event, interrupt: Interrupt) {
461 #[derive(EventToken)]
462 enum Token {
463 Resample,
464 Kill,
465 }
466
467 let interrupt_status = &interrupt.inner.interrupt_status;
468 let interrupt_evt = interrupt.get_interrupt_evt();
469 let resample_evt = interrupt
470 .get_resample_evt()
471 .expect("must have resample evt in interrupt_resample_thread");
472
473 let wait_ctx =
474 WaitContext::build_with(&[(resample_evt, Token::Resample), (&kill_evt, Token::Kill)])
475 .expect("failed to create WaitContext");
476
477 loop {
478 let events = wait_ctx.wait().expect("WaitContext::wait() failed");
479 for event in events {
480 match event.token {
481 Token::Resample => {
482 let _ = resample_evt.wait();
483 if interrupt_status.load(Ordering::SeqCst) != 0 {
484 interrupt_evt.signal().unwrap();
485 }
486 }
487 Token::Kill => return,
488 }
489 }
490 }
491}