1mod edid;
6mod parameters;
7mod protocol;
8mod snapshot;
9mod virtio_gpu;
10
11use std::cell::RefCell;
12use std::collections::BTreeMap;
13use std::io::Read;
14use std::path::PathBuf;
15use std::rc::Rc;
16use std::sync::atomic::AtomicBool;
17use std::sync::atomic::Ordering;
18use std::sync::mpsc;
19use std::sync::Arc;
20
21use ::snapshot::AnySnapshot;
22use anyhow::anyhow;
23use anyhow::Context;
24use base::custom_serde::deserialize_map_from_kv_vec;
25use base::custom_serde::serialize_map_as_kv_vec;
26use base::debug;
27use base::error;
28use base::info;
29#[cfg(any(target_os = "android", target_os = "linux"))]
30use base::linux::move_task_to_cgroup;
31use base::warn;
32use base::AsRawDescriptor;
33use base::Event;
34use base::EventToken;
35use base::RawDescriptor;
36use base::ReadNotifier;
37#[cfg(windows)]
38use base::RecvTube;
39use base::Result;
40use base::SafeDescriptor;
41use base::SendTube;
42use base::Tube;
43use base::VmEventType;
44use base::WaitContext;
45use base::WorkerThread;
46use data_model::*;
47pub use gpu_display::EventDevice;
48use gpu_display::*;
49use hypervisor::MemCacheType;
50pub use parameters::AudioDeviceMode;
51pub use parameters::GpuParameters;
52use rutabaga_gfx::*;
53use serde::Deserialize;
54use serde::Serialize;
55use sync::Mutex;
56pub use vm_control::gpu::DisplayMode as GpuDisplayMode;
57pub use vm_control::gpu::DisplayParameters as GpuDisplayParameters;
58use vm_control::gpu::GpuControlCommand;
59use vm_control::gpu::GpuControlResult;
60pub use vm_control::gpu::MouseMode as GpuMouseMode;
61pub use vm_control::gpu::DEFAULT_DISPLAY_HEIGHT;
62pub use vm_control::gpu::DEFAULT_DISPLAY_WIDTH;
63pub use vm_control::gpu::DEFAULT_REFRESH_RATE;
64#[cfg(windows)]
65use vm_control::ModifyWaitContext;
66use vm_memory::GuestAddress;
67use vm_memory::GuestMemory;
68use zerocopy::IntoBytes;
69
70pub use self::protocol::virtio_gpu_config;
71pub use self::protocol::VIRTIO_GPU_F_CONTEXT_INIT;
72pub use self::protocol::VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
73pub use self::protocol::VIRTIO_GPU_F_EDID;
74pub use self::protocol::VIRTIO_GPU_F_FENCE_PASSING;
75pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
76pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
77pub use self::protocol::VIRTIO_GPU_F_VIRGL;
78pub use self::protocol::VIRTIO_GPU_MAX_SCANOUTS;
79pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
80use self::protocol::*;
81use self::virtio_gpu::to_rutabaga_descriptor;
82pub use self::virtio_gpu::ProcessDisplayResult;
83use self::virtio_gpu::VirtioGpu;
84use self::virtio_gpu::VirtioGpuSnapshot;
85use super::copy_config;
86use super::resource_bridge::ResourceRequest;
87use super::DescriptorChain;
88use super::DeviceType;
89use super::Interrupt;
90use super::Queue;
91use super::Reader;
92use super::SharedMemoryMapper;
93use super::SharedMemoryPrepareType;
94use super::SharedMemoryRegion;
95use super::VirtioDevice;
96use super::Writer;
97use crate::PciAddress;
98
99const QUEUE_SIZES: &[u16] = &[512, 16];
102
103#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Serialize, Deserialize)]
104pub enum GpuMode {
105 #[default]
106 #[serde(rename = "2d", alias = "2D")]
107 Mode2D,
108 #[serde(rename = "virglrenderer", alias = "3d", alias = "3D")]
109 ModeVirglRenderer,
110 #[serde(rename = "gfxstream")]
111 ModeGfxstream,
112}
113
114#[derive(Clone, Debug, Serialize, Deserialize)]
115#[serde(rename_all = "kebab-case")]
116pub enum GpuWsi {
117 #[serde(alias = "vk")]
118 Vulkan,
119}
120
121#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
122pub struct VirtioScanoutBlobData {
123 pub width: u32,
124 pub height: u32,
125 pub drm_format: u32,
126 pub strides: [u32; 4],
127 pub offsets: [u32; 4],
128}
129
130#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
131enum VirtioGpuRing {
132 Global,
133 ContextSpecific { ctx_id: u32, ring_idx: u8 },
134}
135
136struct FenceDescriptor {
137 ring: VirtioGpuRing,
138 fence_id: u64,
139 desc_chain: DescriptorChain,
140 len: u32,
141}
142
143#[derive(Default)]
144pub struct FenceState {
145 descs: Vec<FenceDescriptor>,
146 completed_fences: BTreeMap<VirtioGpuRing, u64>,
147}
148
149#[derive(Serialize, Deserialize)]
150struct FenceStateSnapshot {
151 #[serde(
154 serialize_with = "serialize_map_as_kv_vec",
155 deserialize_with = "deserialize_map_from_kv_vec"
156 )]
157 completed_fences: BTreeMap<VirtioGpuRing, u64>,
158}
159
160impl FenceState {
161 fn snapshot(&self) -> FenceStateSnapshot {
162 assert!(self.descs.is_empty(), "can't snapshot with pending fences");
163 FenceStateSnapshot {
164 completed_fences: self.completed_fences.clone(),
165 }
166 }
167
168 fn restore(&mut self, snapshot: FenceStateSnapshot) {
169 assert!(self.descs.is_empty(), "can't restore activated device");
170 self.completed_fences = snapshot.completed_fences;
171 }
172}
173
174pub trait QueueReader {
175 fn pop(&self) -> Option<DescriptorChain>;
176 fn add_used(&self, desc_chain: DescriptorChain, len: u32);
177 fn signal_used(&self);
178}
179
180struct LocalQueueReader {
181 queue: RefCell<Queue>,
182}
183
184impl LocalQueueReader {
185 fn new(queue: Queue) -> Self {
186 Self {
187 queue: RefCell::new(queue),
188 }
189 }
190}
191
192impl QueueReader for LocalQueueReader {
193 fn pop(&self) -> Option<DescriptorChain> {
194 self.queue.borrow_mut().pop()
195 }
196
197 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
198 self.queue
199 .borrow_mut()
200 .add_used_with_bytes_written(desc_chain, len);
201 }
202
203 fn signal_used(&self) {
204 self.queue.borrow_mut().trigger_interrupt();
205 }
206}
207
208#[derive(Clone)]
209struct SharedQueueReader {
210 queue: Arc<Mutex<Queue>>,
211}
212
213impl SharedQueueReader {
214 fn new(queue: Queue) -> Self {
215 Self {
216 queue: Arc::new(Mutex::new(queue)),
217 }
218 }
219}
220
221impl QueueReader for SharedQueueReader {
222 fn pop(&self) -> Option<DescriptorChain> {
223 self.queue.lock().pop()
224 }
225
226 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
227 self.queue
228 .lock()
229 .add_used_with_bytes_written(desc_chain, len);
230 }
231
232 fn signal_used(&self) {
233 self.queue.lock().trigger_interrupt();
234 }
235}
236
237fn build(
239 display_backends: &[DisplayBackend],
240 display_params: Vec<GpuDisplayParameters>,
241 display_event: Arc<AtomicBool>,
242 rutabaga: Rutabaga,
243 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
244 external_blob: bool,
245 fixed_blob_mapping: bool,
246 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
247 udmabuf: bool,
248 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
249 snapshot_scratch_directory: Option<PathBuf>,
250) -> Option<VirtioGpu> {
251 let mut display_opt = None;
252 for display_backend in display_backends {
253 match display_backend.build(
254 #[cfg(windows)]
255 wndproc_thread,
256 #[cfg(windows)]
257 gpu_display_wait_descriptor_ctrl_wr
258 .try_clone()
259 .expect("failed to clone wait context ctrl channel"),
260 ) {
261 Ok(c) => {
262 display_opt = Some(c);
263 break;
264 }
265 Err(e) => error!("failed to open display: {}", e),
266 };
267 }
268
269 let display = match display_opt {
270 Some(d) => d,
271 None => {
272 error!("failed to open any displays");
273 return None;
274 }
275 };
276
277 VirtioGpu::new(
278 display,
279 display_params,
280 display_event,
281 rutabaga,
282 mapper,
283 external_blob,
284 fixed_blob_mapping,
285 udmabuf,
286 snapshot_scratch_directory,
287 )
288}
289
290pub struct FenceHandlerActivationResources<Q>
292where
293 Q: QueueReader + Send + Clone + 'static,
294{
295 pub mem: GuestMemory,
296 pub ctrl_queue: Q,
297}
298
299pub fn create_fence_handler<Q>(
301 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>,
302 fence_state: Arc<Mutex<FenceState>>,
303) -> RutabagaFenceHandler
304where
305 Q: QueueReader + Send + Clone + 'static,
306{
307 RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| {
308 let mut signal = false;
309
310 if let Some(ref fence_handler_resources) = *fence_handler_resources.lock() {
311 {
313 let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
314 0 => VirtioGpuRing::Global,
315 _ => VirtioGpuRing::ContextSpecific {
316 ctx_id: completed_fence.ctx_id,
317 ring_idx: completed_fence.ring_idx,
318 },
319 };
320
321 let mut fence_state = fence_state.lock();
322 let mut i = 0;
324 while i < fence_state.descs.len() {
325 if fence_state.descs[i].ring == ring
326 && fence_state.descs[i].fence_id <= completed_fence.fence_id
327 {
328 let completed_desc = fence_state.descs.remove(i);
329 fence_handler_resources
330 .ctrl_queue
331 .add_used(completed_desc.desc_chain, completed_desc.len);
332 signal = true;
333 } else {
334 i += 1;
335 }
336 }
337
338 fence_state
340 .completed_fences
341 .insert(ring, completed_fence.fence_id);
342 }
343
344 if signal {
345 fence_handler_resources.ctrl_queue.signal_used();
346 }
347 }
348 })
349}
350
351pub struct ReturnDescriptor {
352 pub desc_chain: DescriptorChain,
353 pub len: u32,
354}
355
356pub struct Frontend {
357 fence_state: Arc<Mutex<FenceState>>,
358 virtio_gpu: VirtioGpu,
359}
360
361impl Frontend {
362 fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
363 Frontend {
364 fence_state,
365 virtio_gpu,
366 }
367 }
368
369 pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
371 self.virtio_gpu.display()
372 }
373
374 pub fn process_display(&mut self) -> ProcessDisplayResult {
376 self.virtio_gpu.process_display()
377 }
378
379 pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
381 let response = match resource_bridge.recv() {
382 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
383 Ok(ResourceRequest::GetFence { seqno }) => self.virtio_gpu.export_fence(seqno),
384 Err(e) => return Err(e).context("Error receiving resource bridge request"),
385 };
386
387 resource_bridge
388 .send(&response)
389 .context("Error sending resource bridge response")?;
390
391 Ok(())
392 }
393
394 pub fn process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult {
397 self.virtio_gpu.process_gpu_control_command(cmd)
398 }
399
400 fn process_gpu_command(
401 &mut self,
402 mem: &GuestMemory,
403 cmd: GpuCommand,
404 reader: &mut Reader,
405 ) -> VirtioGpuResult {
406 self.virtio_gpu.force_ctx_0();
407
408 match cmd {
409 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
410 self.virtio_gpu.display_info().to_vec(),
411 )),
412 GpuCommand::ResourceCreate2d(info) => {
413 let resource_id = info.resource_id.to_native();
414
415 let resource_create_3d = ResourceCreate3D {
416 target: RUTABAGA_PIPE_TEXTURE_2D,
417 format: info.format.to_native(),
418 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
419 width: info.width.to_native(),
420 height: info.height.to_native(),
421 depth: 1,
422 array_size: 1,
423 last_level: 0,
424 nr_samples: 0,
425 flags: 0,
426 };
427
428 self.virtio_gpu
429 .resource_create_3d(resource_id, resource_create_3d)
430 }
431 GpuCommand::ResourceUnref(info) => {
432 self.virtio_gpu.unref_resource(info.resource_id.to_native())
433 }
434 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
435 info.r,
436 info.scanout_id.to_native(),
437 info.resource_id.to_native(),
438 None,
439 ),
440 GpuCommand::ResourceFlush(info) => {
441 self.virtio_gpu.flush_resource(info.resource_id.to_native())
442 }
443 GpuCommand::TransferToHost2d(info) => {
444 let resource_id = info.resource_id.to_native();
445 let transfer = Transfer3D::new_2d(
446 info.r.x.to_native(),
447 info.r.y.to_native(),
448 info.r.width.to_native(),
449 info.r.height.to_native(),
450 info.offset.to_native(),
451 );
452 self.virtio_gpu.transfer_write(0, resource_id, transfer)
453 }
454 GpuCommand::ResourceAttachBacking(info) => {
455 let available_bytes = reader.available_bytes();
456 if available_bytes != 0 {
457 let entry_count = info.nr_entries.to_native() as usize;
458 let mut vecs = Vec::with_capacity(entry_count);
459 for _ in 0..entry_count {
460 match reader.read_obj::<virtio_gpu_mem_entry>() {
461 Ok(entry) => {
462 let addr = GuestAddress(entry.addr.to_native());
463 let len = entry.length.to_native() as usize;
464 vecs.push((addr, len))
465 }
466 Err(_) => return Err(GpuResponse::ErrUnspec),
467 }
468 }
469 self.virtio_gpu
470 .attach_backing(info.resource_id.to_native(), mem, vecs)
471 } else {
472 error!("missing data for command {:?}", cmd);
473 Err(GpuResponse::ErrUnspec)
474 }
475 }
476 GpuCommand::ResourceDetachBacking(info) => {
477 self.virtio_gpu.detach_backing(info.resource_id.to_native())
478 }
479 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
480 info.resource_id.to_native(),
481 info.pos.scanout_id.to_native(),
482 info.pos.x.into(),
483 info.pos.y.into(),
484 ),
485 GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
486 info.pos.scanout_id.to_native(),
487 info.pos.x.into(),
488 info.pos.y.into(),
489 ),
490 GpuCommand::ResourceAssignUuid(info) => {
491 let resource_id = info.resource_id.to_native();
492 self.virtio_gpu.resource_assign_uuid(resource_id)
493 }
494 GpuCommand::GetCapsetInfo(info) => self
495 .virtio_gpu
496 .get_capset_info(info.capset_index.to_native()),
497 GpuCommand::GetCapset(info) => self
498 .virtio_gpu
499 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
500 GpuCommand::CtxCreate(info) => {
501 let context_name: Option<String> = String::from_utf8(info.debug_name.to_vec()).ok();
502 self.virtio_gpu.create_context(
503 info.hdr.ctx_id.to_native(),
504 info.context_init.to_native(),
505 context_name.as_deref(),
506 )
507 }
508 GpuCommand::CtxDestroy(info) => {
509 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
510 }
511 GpuCommand::CtxAttachResource(info) => self
512 .virtio_gpu
513 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
514 GpuCommand::CtxDetachResource(info) => self
515 .virtio_gpu
516 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
517 GpuCommand::ResourceCreate3d(info) => {
518 let resource_id = info.resource_id.to_native();
519 let resource_create_3d = ResourceCreate3D {
520 target: info.target.to_native(),
521 format: info.format.to_native(),
522 bind: info.bind.to_native(),
523 width: info.width.to_native(),
524 height: info.height.to_native(),
525 depth: info.depth.to_native(),
526 array_size: info.array_size.to_native(),
527 last_level: info.last_level.to_native(),
528 nr_samples: info.nr_samples.to_native(),
529 flags: info.flags.to_native(),
530 };
531
532 self.virtio_gpu
533 .resource_create_3d(resource_id, resource_create_3d)
534 }
535 GpuCommand::TransferToHost3d(info) => {
536 let ctx_id = info.hdr.ctx_id.to_native();
537 let resource_id = info.resource_id.to_native();
538
539 let transfer = Transfer3D {
540 x: info.box_.x.to_native(),
541 y: info.box_.y.to_native(),
542 z: info.box_.z.to_native(),
543 w: info.box_.w.to_native(),
544 h: info.box_.h.to_native(),
545 d: info.box_.d.to_native(),
546 level: info.level.to_native(),
547 stride: info.stride.to_native(),
548 layer_stride: info.layer_stride.to_native(),
549 offset: info.offset.to_native(),
550 };
551
552 self.virtio_gpu
553 .transfer_write(ctx_id, resource_id, transfer)
554 }
555 GpuCommand::TransferFromHost3d(info) => {
556 let ctx_id = info.hdr.ctx_id.to_native();
557 let resource_id = info.resource_id.to_native();
558
559 let transfer = Transfer3D {
560 x: info.box_.x.to_native(),
561 y: info.box_.y.to_native(),
562 z: info.box_.z.to_native(),
563 w: info.box_.w.to_native(),
564 h: info.box_.h.to_native(),
565 d: info.box_.d.to_native(),
566 level: info.level.to_native(),
567 stride: info.stride.to_native(),
568 layer_stride: info.layer_stride.to_native(),
569 offset: info.offset.to_native(),
570 };
571
572 self.virtio_gpu
573 .transfer_read(ctx_id, resource_id, transfer, None)
574 }
575 GpuCommand::CmdSubmit3d(info) => {
576 if reader.available_bytes() != 0 {
577 let num_in_fences = info.num_in_fences.to_native() as usize;
578 let cmd_size = info.size.to_native() as usize;
579 let mut cmd_buf = vec![0; cmd_size];
580 let mut fence_ids: Vec<u64> = Vec::with_capacity(num_in_fences);
581 let ctx_id = info.hdr.ctx_id.to_native();
582
583 for _ in 0..num_in_fences {
584 match reader.read_obj::<Le64>() {
585 Ok(fence_id) => {
586 fence_ids.push(fence_id.to_native());
587 }
588 Err(_) => return Err(GpuResponse::ErrUnspec),
589 }
590 }
591
592 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
593 self.virtio_gpu
594 .submit_command(ctx_id, &mut cmd_buf[..], &fence_ids[..])
595 } else {
596 Err(GpuResponse::ErrInvalidParameter)
597 }
598 } else {
599 Ok(GpuResponse::OkNoData)
602 }
603 }
604 GpuCommand::ResourceCreateBlob(info) => {
605 let resource_id = info.resource_id.to_native();
606 let ctx_id = info.hdr.ctx_id.to_native();
607
608 let resource_create_blob = ResourceCreateBlob {
609 blob_mem: info.blob_mem.to_native(),
610 blob_flags: info.blob_flags.to_native(),
611 blob_id: info.blob_id.to_native(),
612 size: info.size.to_native(),
613 };
614
615 let entry_count = info.nr_entries.to_native();
616 if reader.available_bytes() == 0 && entry_count > 0 {
617 return Err(GpuResponse::ErrUnspec);
618 }
619
620 let mut vecs = Vec::with_capacity(entry_count as usize);
621 for _ in 0..entry_count {
622 match reader.read_obj::<virtio_gpu_mem_entry>() {
623 Ok(entry) => {
624 let addr = GuestAddress(entry.addr.to_native());
625 let len = entry.length.to_native() as usize;
626 vecs.push((addr, len))
627 }
628 Err(_) => return Err(GpuResponse::ErrUnspec),
629 }
630 }
631
632 self.virtio_gpu.resource_create_blob(
633 ctx_id,
634 resource_id,
635 resource_create_blob,
636 vecs,
637 mem,
638 )
639 }
640 GpuCommand::SetScanoutBlob(info) => {
641 let scanout_id = info.scanout_id.to_native();
642 let resource_id = info.resource_id.to_native();
643 let virtio_gpu_format = info.format.to_native();
644 let width = info.width.to_native();
645 let height = info.height.to_native();
646 let mut strides: [u32; 4] = [0; 4];
647 let mut offsets: [u32; 4] = [0; 4];
648
649 let drm_format = match virtio_gpu_format {
652 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
653 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
654 VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM => DrmFormat::new(b'R', b'A', b'2', b'4'),
655 _ => {
656 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
657 return Err(GpuResponse::ErrUnspec);
658 }
659 };
660
661 for plane_index in 0..PLANE_INFO_MAX_COUNT {
662 offsets[plane_index] = info.offsets[plane_index].to_native();
663 strides[plane_index] = info.strides[plane_index].to_native();
664 }
665
666 let scanout = VirtioScanoutBlobData {
667 width,
668 height,
669 drm_format: drm_format.into(),
670 strides,
671 offsets,
672 };
673
674 self.virtio_gpu
675 .set_scanout(info.r, scanout_id, resource_id, Some(scanout))
676 }
677 GpuCommand::ResourceMapBlob(info) => {
678 let resource_id = info.resource_id.to_native();
679 let offset = info.offset.to_native();
680 self.virtio_gpu
681 .resource_map_blob(resource_id, offset)
682 .inspect_err(|e| {
683 error!(
685 "Failed to map blob, resource id {}, offset {}, error: {:#}",
686 resource_id, offset, e
687 );
688 })
689 .map_err(|e| match e.downcast::<GpuResponse>() {
690 Ok(response) => response,
691 Err(e) => {
692 warn!(
693 "No GPU response specified for {:?}, default to ErrUnspec",
694 e
695 );
696 GpuResponse::ErrUnspec
697 }
698 })
699 }
700 GpuCommand::ResourceUnmapBlob(info) => {
701 let resource_id = info.resource_id.to_native();
702 self.virtio_gpu.resource_unmap_blob(resource_id)
703 }
704 GpuCommand::GetEdid(info) => self.virtio_gpu.get_edid(info.scanout.to_native()),
705 }
706 }
707
708 pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
710 let mut signal_used = false;
711 while let Some(desc) = queue.pop() {
712 if let Some(ret_desc) = self.process_descriptor(mem, desc) {
713 queue.add_used(ret_desc.desc_chain, ret_desc.len);
714 signal_used = true;
715 }
716 }
717
718 signal_used
719 }
720
721 fn process_descriptor(
722 &mut self,
723 mem: &GuestMemory,
724 mut desc_chain: DescriptorChain,
725 ) -> Option<ReturnDescriptor> {
726 let reader = &mut desc_chain.reader;
727 let writer = &mut desc_chain.writer;
728 let mut resp = Err(GpuResponse::ErrUnspec);
729 let mut gpu_cmd = None;
730 let mut len = 0;
731 match GpuCommand::decode(reader) {
732 Ok(cmd) => {
733 resp = self.process_gpu_command(mem, cmd, reader);
734 gpu_cmd = Some(cmd);
735 }
736 Err(e) => debug!("descriptor decode error: {}", e),
737 }
738
739 let mut gpu_response = match resp {
740 Ok(gpu_response) => gpu_response,
741 Err(gpu_response) => {
742 if let Some(gpu_cmd) = gpu_cmd {
743 error!(
744 "error processing gpu command {:?}: {:?}",
745 gpu_cmd, gpu_response
746 );
747 }
748 gpu_response
749 }
750 };
751
752 if writer.available_bytes() != 0 {
753 let mut fence_id = 0;
754 let mut ctx_id = 0;
755 let mut flags = 0;
756 let mut ring_idx = 0;
757 if let Some(cmd) = gpu_cmd {
758 let ctrl_hdr = cmd.ctrl_hdr();
759 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
760 flags = ctrl_hdr.flags.to_native();
761 fence_id = ctrl_hdr.fence_id.to_native();
762 ctx_id = ctrl_hdr.ctx_id.to_native();
763 ring_idx = ctrl_hdr.ring_idx;
764
765 let fence = RutabagaFence {
766 flags,
767 fence_id,
768 ctx_id,
769 ring_idx,
770 };
771 gpu_response = match self.virtio_gpu.create_fence(fence) {
772 Ok(_) => gpu_response,
773 Err(fence_resp) => {
774 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
775 fence_resp
776 }
777 };
778 }
779 }
780
781 match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
784 Ok(l) => len = l,
785 Err(e) => debug!("ctrl queue response encode error: {}", e),
786 }
787
788 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
789 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
790 0 => VirtioGpuRing::Global,
791 _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
792 };
793
794 let mut fence_state = self.fence_state.lock();
797 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
798 fence_state.descs.push(FenceDescriptor {
799 ring,
800 fence_id,
801 desc_chain,
802 len,
803 });
804
805 return None;
806 }
807 }
808
809 }
811 Some(ReturnDescriptor { desc_chain, len })
812 }
813
814 pub fn event_poll(&self) {
815 self.virtio_gpu.event_poll();
816 }
817}
818
819#[derive(EventToken, PartialEq, Eq, Clone, Copy, Debug)]
820enum WorkerToken {
821 CtrlQueue,
822 CursorQueue,
823 Display,
824 GpuControl,
825 Sleep,
826 Kill,
827 ResourceBridge {
828 index: usize,
829 },
830 VirtioGpuPoll,
831 #[cfg(windows)]
832 DisplayDescriptorRequest,
833}
834
835struct EventManager<'a> {
836 pub wait_ctx: WaitContext<WorkerToken>,
837 events: Vec<(&'a dyn AsRawDescriptor, WorkerToken)>,
838}
839
840impl<'a> EventManager<'a> {
841 pub fn new() -> Result<EventManager<'a>> {
842 Ok(EventManager {
843 wait_ctx: WaitContext::new()?,
844 events: vec![],
845 })
846 }
847
848 pub fn build_with(
849 triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)],
850 ) -> Result<EventManager<'a>> {
851 let mut manager = EventManager::new()?;
852 manager.wait_ctx.add_many(triggers)?;
853
854 for (descriptor, token) in triggers {
855 manager.events.push((*descriptor, *token));
856 }
857 Ok(manager)
858 }
859
860 pub fn add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()> {
861 self.wait_ctx.add(descriptor, token)?;
862 self.events.push((descriptor, token));
863 Ok(())
864 }
865
866 pub fn delete(&mut self, token: WorkerToken) {
867 self.events.retain(|event| {
868 if event.1 == token {
869 self.wait_ctx.delete(event.0).ok();
870 return false;
871 }
872 true
873 });
874 }
875}
876
877#[derive(Serialize, Deserialize)]
878struct WorkerSnapshot {
879 fence_state_snapshot: FenceStateSnapshot,
880 virtio_gpu_snapshot: VirtioGpuSnapshot,
881}
882
883struct WorkerActivateRequest {
884 resources: GpuActivationResources,
885}
886
887enum WorkerRequest {
888 Activate(WorkerActivateRequest),
889 Suspend,
890 Snapshot,
891 Restore(WorkerSnapshot),
892}
893
894enum WorkerResponse {
895 Ok,
896 Suspend(GpuDeactivationResources),
897 Snapshot(WorkerSnapshot),
898}
899
900struct GpuActivationResources {
901 mem: GuestMemory,
902 interrupt: Interrupt,
903 ctrl_queue: SharedQueueReader,
904 cursor_queue: LocalQueueReader,
905}
906
907struct GpuDeactivationResources {
908 queues: Option<Vec<Queue>>,
909}
910
911struct Worker {
912 request_receiver: mpsc::Receiver<WorkerRequest>,
913 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
914 exit_evt_wrtube: SendTube,
915 gpu_control_tube: Tube,
916 resource_bridges: ResourceBridges,
917 suspend_evt: Event,
918 kill_evt: Event,
919 state: Frontend,
920 fence_state: Arc<Mutex<FenceState>>,
921 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<SharedQueueReader>>>>,
922 #[cfg(windows)]
923 gpu_display_wait_descriptor_ctrl_rd: RecvTube,
924 activation_resources: Option<GpuActivationResources>,
925}
926
927#[derive(Copy, Clone)]
928enum WorkerStopReason {
929 Sleep,
930 Kill,
931}
932
933enum WorkerState {
934 Inactive,
935 Active,
936 Error,
937}
938
939fn build_rutabaga(
940 gpu_parameters: &GpuParameters,
941 display_params: &[GpuDisplayParameters],
942 rutabaga_component: RutabagaComponentType,
943 rutabaga_paths: Vec<RutabagaPath>,
944 rutabaga_server_descriptor: Option<RutabagaDescriptor>,
945 fence_handler: RutabagaFenceHandler,
946) -> RutabagaResult<Rutabaga> {
947 let (display_width, display_height) = display_params[0].get_virtual_display_size();
948
949 let use_render_server =
953 rutabaga_server_descriptor.is_some() || gpu_parameters.allow_implicit_render_server_exec;
954
955 let rutabaga_wsi = match gpu_parameters.wsi {
956 Some(GpuWsi::Vulkan) => RutabagaWsi::VulkanSwapchain,
957 _ => RutabagaWsi::Surfaceless,
958 };
959
960 RutabagaBuilder::new(gpu_parameters.capset_mask, fence_handler)
961 .set_default_component(rutabaga_component)
962 .set_display_width(display_width)
963 .set_display_height(display_height)
964 .set_rutabaga_paths(Some(rutabaga_paths))
965 .set_use_egl(gpu_parameters.renderer_use_egl)
966 .set_use_gles(gpu_parameters.renderer_use_gles)
967 .set_use_surfaceless(gpu_parameters.renderer_use_surfaceless)
968 .set_use_vulkan(gpu_parameters.use_vulkan.unwrap_or_default())
969 .set_wsi(rutabaga_wsi)
970 .set_use_external_blob(gpu_parameters.external_blob)
971 .set_use_system_blob(gpu_parameters.system_blob)
972 .set_use_render_server(use_render_server)
973 .set_renderer_features(gpu_parameters.renderer_features.clone())
974 .set_server_descriptor(rutabaga_server_descriptor)
975 .build()
976}
977
978impl Worker {
979 fn new(
980 gpu_parameters: GpuParameters,
981 rutabaga_paths: Vec<RutabagaPath>,
982 rutabaga_component: RutabagaComponentType,
983 rutabaga_server_descriptor: Option<RutabagaDescriptor>,
984 display_backends: Vec<DisplayBackend>,
985 display_params: Vec<GpuDisplayParameters>,
986 display_event: Arc<AtomicBool>,
987 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
988 event_devices: Vec<EventDevice>,
989 external_blob: bool,
990 fixed_blob_mapping: bool,
991 udmabuf: bool,
992 request_receiver: mpsc::Receiver<WorkerRequest>,
993 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
994 exit_evt_wrtube: SendTube,
995 gpu_control_tube: Tube,
996 resource_bridges: ResourceBridges,
997 suspend_evt: Event,
998 kill_evt: Event,
999 #[cfg(windows)] mut wndproc_thread: Option<WindowProcedureThread>,
1000 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_rd: RecvTube,
1001 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
1002 snapshot_scratch_directory: Option<PathBuf>,
1003 ) -> anyhow::Result<Worker> {
1004 let fence_state = Arc::new(Mutex::new(Default::default()));
1005 let fence_handler_resources = Arc::new(Mutex::new(None));
1006 let fence_handler =
1007 create_fence_handler(fence_handler_resources.clone(), fence_state.clone());
1008
1009 let rutabaga = build_rutabaga(
1010 &gpu_parameters,
1011 &display_params,
1012 rutabaga_component,
1013 rutabaga_paths,
1014 rutabaga_server_descriptor,
1015 fence_handler,
1016 )?;
1017
1018 let mut virtio_gpu = build(
1019 &display_backends,
1020 display_params,
1021 display_event,
1022 rutabaga,
1023 mapper,
1024 external_blob,
1025 fixed_blob_mapping,
1026 #[cfg(windows)]
1027 &mut wndproc_thread,
1028 udmabuf,
1029 #[cfg(windows)]
1030 gpu_display_wait_descriptor_ctrl_wr,
1031 snapshot_scratch_directory,
1032 )
1033 .ok_or_else(|| anyhow!("failed to build virtio gpu"))?;
1034
1035 for event_device in event_devices {
1036 virtio_gpu
1037 .import_event_device(event_device)
1038 .context("failed to import event device")?;
1040 }
1041
1042 Ok(Worker {
1043 request_receiver,
1044 response_sender,
1045 exit_evt_wrtube,
1046 gpu_control_tube,
1047 resource_bridges,
1048 suspend_evt,
1049 kill_evt,
1050 state: Frontend::new(virtio_gpu, fence_state.clone()),
1051 fence_state,
1052 fence_handler_resources,
1053 #[cfg(windows)]
1054 gpu_display_wait_descriptor_ctrl_rd,
1055 activation_resources: None,
1056 })
1057 }
1058
1059 fn run(&mut self) {
1060 loop {
1064 let request = match self.request_receiver.recv() {
1065 Ok(r) => r,
1066 Err(_) => {
1067 info!("virtio gpu worker connection ended, exiting.");
1068 return;
1069 }
1070 };
1071
1072 match request {
1073 WorkerRequest::Activate(request) => {
1074 let response = self.on_activate(request).map(|_| WorkerResponse::Ok);
1075 self.response_sender
1076 .send(response)
1077 .expect("failed to send gpu worker response for activate");
1078
1079 let stop_reason = self
1080 .run_until_sleep_or_exit()
1081 .expect("failed to run gpu worker processing");
1082
1083 if let WorkerStopReason::Kill = stop_reason {
1084 break;
1085 }
1086 }
1087 WorkerRequest::Suspend => {
1088 let response = self.on_suspend().map(WorkerResponse::Suspend);
1089 self.response_sender
1090 .send(response)
1091 .expect("failed to send gpu worker response for suspend");
1092 }
1093 WorkerRequest::Snapshot => {
1094 let response = self.on_snapshot().map(WorkerResponse::Snapshot);
1095 self.response_sender
1096 .send(response)
1097 .expect("failed to send gpu worker response for snapshot");
1098 }
1099 WorkerRequest::Restore(snapshot) => {
1100 let response = self.on_restore(snapshot).map(|_| WorkerResponse::Ok);
1101 self.response_sender
1102 .send(response)
1103 .expect("failed to send gpu worker response for restore");
1104 }
1105 }
1106 }
1107 }
1108
1109 fn on_activate(&mut self, request: WorkerActivateRequest) -> anyhow::Result<()> {
1110 self.fence_handler_resources
1111 .lock()
1112 .replace(FenceHandlerActivationResources {
1113 mem: request.resources.mem.clone(),
1114 ctrl_queue: request.resources.ctrl_queue.clone(),
1115 });
1116
1117 self.state
1118 .virtio_gpu
1119 .resume(&request.resources.mem)
1120 .context("gpu worker failed to activate virtio frontend")?;
1121
1122 self.activation_resources = Some(request.resources);
1123
1124 Ok(())
1125 }
1126
1127 fn on_suspend(&mut self) -> anyhow::Result<GpuDeactivationResources> {
1128 self.state
1129 .virtio_gpu
1130 .suspend()
1131 .context("failed to suspend VirtioGpu")?;
1132
1133 self.fence_handler_resources.lock().take();
1134
1135 let queues = if let Some(activation_resources) = self.activation_resources.take() {
1136 Some(vec![
1137 match Arc::try_unwrap(activation_resources.ctrl_queue.queue) {
1138 Ok(x) => x.into_inner(),
1139 Err(_) => panic!("too many refs on ctrl_queue"),
1140 },
1141 activation_resources.cursor_queue.queue.into_inner(),
1142 ])
1143 } else {
1144 None
1145 };
1146
1147 Ok(GpuDeactivationResources { queues })
1148 }
1149
1150 fn on_snapshot(&mut self) -> anyhow::Result<WorkerSnapshot> {
1151 Ok(WorkerSnapshot {
1152 fence_state_snapshot: self.fence_state.lock().snapshot(),
1153 virtio_gpu_snapshot: self
1154 .state
1155 .virtio_gpu
1156 .snapshot()
1157 .context("failed to snapshot VirtioGpu")?,
1158 })
1159 }
1160
1161 fn on_restore(&mut self, snapshot: WorkerSnapshot) -> anyhow::Result<()> {
1162 self.fence_state
1163 .lock()
1164 .restore(snapshot.fence_state_snapshot);
1165
1166 self.state
1167 .virtio_gpu
1168 .restore(snapshot.virtio_gpu_snapshot)
1169 .context("failed to restore VirtioGpu")?;
1170
1171 Ok(())
1172 }
1173
1174 fn run_until_sleep_or_exit(&mut self) -> anyhow::Result<WorkerStopReason> {
1175 let activation_resources = self
1176 .activation_resources
1177 .as_ref()
1178 .context("virtio gpu worker missing activation resources")?;
1179
1180 let display_desc =
1181 SafeDescriptor::try_from(&*self.state.display().borrow() as &dyn AsRawDescriptor)
1182 .context("failed getting event descriptor for display")?;
1183
1184 let ctrl_evt = activation_resources
1185 .ctrl_queue
1186 .queue
1187 .lock()
1188 .event()
1189 .try_clone()
1190 .context("failed to clone queue event")?;
1191 let cursor_evt = activation_resources
1192 .cursor_queue
1193 .queue
1194 .borrow()
1195 .event()
1196 .try_clone()
1197 .context("failed to clone queue event")?;
1198
1199 let mut event_manager = EventManager::build_with(&[
1200 (&ctrl_evt, WorkerToken::CtrlQueue),
1201 (&cursor_evt, WorkerToken::CursorQueue),
1202 (&display_desc, WorkerToken::Display),
1203 (
1204 self.gpu_control_tube.get_read_notifier(),
1205 WorkerToken::GpuControl,
1206 ),
1207 (&self.suspend_evt, WorkerToken::Sleep),
1208 (&self.kill_evt, WorkerToken::Kill),
1209 #[cfg(windows)]
1210 (
1211 self.gpu_display_wait_descriptor_ctrl_rd.get_read_notifier(),
1212 WorkerToken::DisplayDescriptorRequest,
1213 ),
1214 ])
1215 .context("failed creating gpu worker WaitContext")?;
1216
1217 let poll_desc: SafeDescriptor;
1218 if let Some(desc) = self.state.virtio_gpu.poll_descriptor() {
1219 poll_desc = desc;
1220 event_manager
1221 .add(&poll_desc, WorkerToken::VirtioGpuPoll)
1222 .context("failed adding poll event to WaitContext")?;
1223 }
1224
1225 self.resource_bridges
1226 .add_to_wait_context(&mut event_manager.wait_ctx);
1227
1228 loop {
1237 let events = event_manager
1238 .wait_ctx
1239 .wait()
1240 .context("failed polling for gpu worker events")?;
1241
1242 let mut signal_used_cursor = false;
1243 let mut signal_used_ctrl = false;
1244 let mut ctrl_available = false;
1245 let mut display_available = false;
1246 let mut needs_config_interrupt = false;
1247
1248 for event in events.iter().filter(|e| e.is_hungup) {
1251 if event.token == WorkerToken::GpuControl {
1252 return Ok(WorkerStopReason::Kill);
1253 }
1254 error!(
1255 "unhandled virtio-gpu worker event hang-up detected: {:?}",
1256 event.token
1257 );
1258 event_manager.delete(event.token);
1259 }
1260
1261 for event in events.iter().filter(|e| e.is_readable) {
1262 match event.token {
1263 WorkerToken::CtrlQueue => {
1264 let _ = ctrl_evt.wait();
1265 ctrl_available = true;
1268 }
1269 WorkerToken::CursorQueue => {
1270 let _ = cursor_evt.wait();
1271 if self.state.process_queue(
1272 &activation_resources.mem,
1273 &activation_resources.cursor_queue,
1274 ) {
1275 signal_used_cursor = true;
1276 }
1277 }
1278 WorkerToken::Display => {
1279 display_available = true;
1282 }
1283 #[cfg(windows)]
1284 WorkerToken::DisplayDescriptorRequest => {
1285 if let Ok(req) = self
1286 .gpu_display_wait_descriptor_ctrl_rd
1287 .recv::<ModifyWaitContext>()
1288 {
1289 match req {
1290 ModifyWaitContext::Add(desc) => {
1291 if let Err(e) =
1292 event_manager.wait_ctx.add(&desc, WorkerToken::Display)
1293 {
1294 error!(
1295 "failed to add extra descriptor from display \
1296 to GPU worker wait context: {:?}",
1297 e
1298 )
1299 }
1300 }
1301 }
1302 } else {
1303 error!("failed to receive ModifyWaitContext request.")
1304 }
1305 }
1306 WorkerToken::GpuControl => {
1307 let req = self
1308 .gpu_control_tube
1309 .recv()
1310 .context("failed to recv from gpu control socket")?;
1311 let resp = self.state.process_gpu_control_command(req);
1312
1313 if let GpuControlResult::DisplaysUpdated = resp {
1314 needs_config_interrupt = true;
1315 }
1316
1317 self.gpu_control_tube
1318 .send(&resp)
1319 .context("failed to send gpu control socket response")?;
1320 }
1321 WorkerToken::ResourceBridge { index } => {
1322 self.resource_bridges.set_should_process(index);
1323 }
1324 WorkerToken::VirtioGpuPoll => {
1325 self.state.event_poll();
1326 }
1327 WorkerToken::Sleep => {
1328 return Ok(WorkerStopReason::Sleep);
1329 }
1330 WorkerToken::Kill => {
1331 return Ok(WorkerStopReason::Kill);
1332 }
1333 }
1334 }
1335
1336 if display_available {
1337 match self.state.process_display() {
1338 ProcessDisplayResult::CloseRequested => {
1339 let _ = self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit);
1340 }
1341 ProcessDisplayResult::Error(_e) => {
1342 base::error!("Display processing failed, disabling display event handler.");
1343 event_manager.delete(WorkerToken::Display);
1344 }
1345 ProcessDisplayResult::Success => (),
1346 };
1347 }
1348
1349 if ctrl_available
1350 && self
1351 .state
1352 .process_queue(&activation_resources.mem, &activation_resources.ctrl_queue)
1353 {
1354 signal_used_ctrl = true;
1355 }
1356
1357 self.resource_bridges
1364 .process_resource_bridges(&mut self.state, &mut event_manager.wait_ctx);
1365
1366 if signal_used_ctrl {
1367 activation_resources.ctrl_queue.signal_used();
1368 }
1369
1370 if signal_used_cursor {
1371 activation_resources.cursor_queue.signal_used();
1372 }
1373
1374 if needs_config_interrupt {
1375 activation_resources.interrupt.signal_config_changed();
1376 }
1377 }
1378 }
1379}
1380
1381#[derive(Clone)]
1386pub enum DisplayBackend {
1387 #[cfg(any(target_os = "android", target_os = "linux"))]
1388 Wayland(Option<PathBuf>),
1390 #[cfg(any(target_os = "android", target_os = "linux"))]
1391 X(Option<String>),
1393 Stub,
1395 #[cfg(windows)]
1396 WinApi,
1398 #[cfg(feature = "android_display")]
1399 Android(String),
1404}
1405
1406impl DisplayBackend {
1407 fn build(
1408 &self,
1409 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
1410 #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube,
1411 ) -> std::result::Result<GpuDisplay, GpuDisplayError> {
1412 match self {
1413 #[cfg(any(target_os = "android", target_os = "linux"))]
1414 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
1415 #[cfg(any(target_os = "android", target_os = "linux"))]
1416 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_deref()),
1417 DisplayBackend::Stub => GpuDisplay::open_stub(),
1418 #[cfg(windows)]
1419 DisplayBackend::WinApi => match wndproc_thread.take() {
1420 Some(wndproc_thread) => GpuDisplay::open_winapi(
1421 wndproc_thread,
1422 None,
1423 gpu_display_wait_descriptor_ctrl,
1424 None,
1425 ),
1426 None => {
1427 error!("wndproc_thread is none");
1428 Err(GpuDisplayError::Allocate)
1429 }
1430 },
1431 #[cfg(feature = "android_display")]
1432 DisplayBackend::Android(service_name) => GpuDisplay::open_android(service_name),
1433 }
1434 }
1435}
1436
1437pub struct Gpu {
1438 exit_evt_wrtube: SendTube,
1439 pub gpu_control_tube: Option<Tube>,
1440 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1441 resource_bridges: Option<ResourceBridges>,
1442 event_devices: Option<Vec<EventDevice>>,
1443 worker_suspend_evt: Option<Event>,
1444 worker_request_sender: Option<mpsc::Sender<WorkerRequest>>,
1445 worker_response_receiver: Option<mpsc::Receiver<anyhow::Result<WorkerResponse>>>,
1446 worker_state: WorkerState,
1447 worker_thread: Option<WorkerThread<()>>,
1448 display_backends: Vec<DisplayBackend>,
1449 display_params: Vec<GpuDisplayParameters>,
1450 display_event: Arc<AtomicBool>,
1451 gpu_parameters: GpuParameters,
1452 rutabaga_paths: Vec<RutabagaPath>,
1453 pci_address: Option<PciAddress>,
1454 pci_bar_size: u64,
1455 external_blob: bool,
1456 fixed_blob_mapping: bool,
1457 rutabaga_component: RutabagaComponentType,
1458 #[cfg(windows)]
1459 wndproc_thread: Option<WindowProcedureThread>,
1460 base_features: u64,
1461 udmabuf: bool,
1462 rutabaga_server_descriptor: Option<SafeDescriptor>,
1463 #[cfg(windows)]
1464 gpu_display_wait_descriptor_ctrl_wr: SendTube,
1468 #[cfg(windows)]
1469 gpu_display_wait_descriptor_ctrl_rd: Option<RecvTube>,
1472 capset_mask: u64,
1473 #[cfg(any(target_os = "android", target_os = "linux"))]
1474 gpu_cgroup_path: Option<PathBuf>,
1475 snapshot_scratch_directory: Option<PathBuf>,
1476}
1477
1478impl Gpu {
1479 pub fn new(
1480 exit_evt_wrtube: SendTube,
1481 gpu_control_tube: Tube,
1482 resource_bridges: Vec<Tube>,
1483 display_backends: Vec<DisplayBackend>,
1484 gpu_parameters: &GpuParameters,
1485 rutabaga_server_descriptor: Option<SafeDescriptor>,
1486 event_devices: Vec<EventDevice>,
1487 base_features: u64,
1488 paths: &BTreeMap<String, PathBuf>,
1489 #[cfg(windows)] wndproc_thread: WindowProcedureThread,
1490 #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>,
1491 ) -> Gpu {
1492 let mut display_params = gpu_parameters.display_params.clone();
1493 if display_params.is_empty() {
1494 display_params.push(Default::default());
1495 }
1496
1497 let mut rutabaga_paths: Vec<RutabagaPath> = Vec::new();
1498 for (name, path) in paths {
1499 match &name[..] {
1500 "" => rutabaga_paths.push(RutabagaPath {
1501 path: path.clone(),
1502 path_type: RUTABAGA_PATH_TYPE_WAYLAND,
1503 }),
1504 _ => error!("unknown rutabaga path"),
1505 }
1506 }
1507
1508 let component = match gpu_parameters.mode {
1509 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1510 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1511 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1512 };
1513
1514 #[cfg(windows)]
1515 let (gpu_display_wait_descriptor_ctrl_wr, gpu_display_wait_descriptor_ctrl_rd) =
1516 Tube::directional_pair().expect("failed to create wait descriptor control pair.");
1517
1518 Gpu {
1519 exit_evt_wrtube,
1520 gpu_control_tube: Some(gpu_control_tube),
1521 mapper: Arc::new(Mutex::new(None)),
1522 resource_bridges: Some(ResourceBridges::new(resource_bridges)),
1523 event_devices: Some(event_devices),
1524 worker_request_sender: None,
1525 worker_response_receiver: None,
1526 worker_suspend_evt: None,
1527 worker_state: WorkerState::Inactive,
1528 worker_thread: None,
1529 display_backends,
1530 display_params,
1531 display_event: Arc::new(AtomicBool::new(false)),
1532 gpu_parameters: gpu_parameters.clone(),
1533 rutabaga_paths,
1534 pci_address: gpu_parameters.pci_address,
1535 pci_bar_size: gpu_parameters.pci_bar_size,
1536 external_blob: gpu_parameters.external_blob,
1537 fixed_blob_mapping: gpu_parameters.fixed_blob_mapping,
1538 rutabaga_component: component,
1539 #[cfg(windows)]
1540 wndproc_thread: Some(wndproc_thread),
1541 base_features,
1542 udmabuf: gpu_parameters.udmabuf,
1543 rutabaga_server_descriptor,
1544 #[cfg(windows)]
1545 gpu_display_wait_descriptor_ctrl_wr,
1546 #[cfg(windows)]
1547 gpu_display_wait_descriptor_ctrl_rd: Some(gpu_display_wait_descriptor_ctrl_rd),
1548 capset_mask: gpu_parameters.capset_mask,
1549 #[cfg(any(target_os = "android", target_os = "linux"))]
1550 gpu_cgroup_path: gpu_cgroup_path.cloned(),
1551 snapshot_scratch_directory: gpu_parameters.snapshot_scratch_path.clone(),
1552 }
1553 }
1554
1555 pub fn initialize_frontend(
1559 &mut self,
1560 fence_state: Arc<Mutex<FenceState>>,
1561 fence_handler: RutabagaFenceHandler,
1562 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1563 ) -> Option<Frontend> {
1564 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1565 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1566 });
1567
1568 let rutabaga = build_rutabaga(
1569 &self.gpu_parameters,
1570 &self.display_params,
1571 self.rutabaga_component,
1572 self.rutabaga_paths.clone(),
1573 rutabaga_server_descriptor,
1574 fence_handler,
1575 )
1576 .map_err(|e| error!("failed to build rutabaga {}", e))
1577 .ok()?;
1578
1579 let mut virtio_gpu = build(
1580 &self.display_backends,
1581 self.display_params.clone(),
1582 self.display_event.clone(),
1583 rutabaga,
1584 mapper,
1585 self.external_blob,
1586 self.fixed_blob_mapping,
1587 #[cfg(windows)]
1588 &mut self.wndproc_thread,
1589 self.udmabuf,
1590 #[cfg(windows)]
1591 self.gpu_display_wait_descriptor_ctrl_wr
1592 .try_clone()
1593 .expect("failed to clone wait context control channel"),
1594 self.snapshot_scratch_directory.clone(),
1595 )?;
1596
1597 for event_device in self.event_devices.take().expect("missing event_devices") {
1598 virtio_gpu
1599 .import_event_device(event_device)
1600 .expect("failed to import event device");
1602 }
1603
1604 Some(Frontend::new(virtio_gpu, fence_state))
1605 }
1606
1607 fn start_worker_thread(&mut self) {
1609 let suspend_evt = Event::new().unwrap();
1610 let suspend_evt_copy = suspend_evt
1611 .try_clone()
1612 .context("error cloning suspend event")
1613 .unwrap();
1614
1615 let exit_evt_wrtube = self
1616 .exit_evt_wrtube
1617 .try_clone()
1618 .context("error cloning exit tube")
1619 .unwrap();
1620
1621 let gpu_control_tube = self
1622 .gpu_control_tube
1623 .take()
1624 .context("gpu_control_tube is none")
1625 .unwrap();
1626
1627 let resource_bridges = self
1628 .resource_bridges
1629 .take()
1630 .context("resource_bridges is none")
1631 .unwrap();
1632
1633 let display_backends = self.display_backends.clone();
1634 let display_params = self.display_params.clone();
1635 let display_event = self.display_event.clone();
1636 let event_devices = self.event_devices.take().expect("missing event_devices");
1637 let external_blob = self.external_blob;
1638 let fixed_blob_mapping = self.fixed_blob_mapping;
1639 let udmabuf = self.udmabuf;
1640 let snapshot_scratch_directory = self.snapshot_scratch_directory.clone();
1641
1642 #[cfg(windows)]
1643 let mut wndproc_thread = self.wndproc_thread.take();
1644
1645 #[cfg(windows)]
1646 let gpu_display_wait_descriptor_ctrl_wr = self
1647 .gpu_display_wait_descriptor_ctrl_wr
1648 .try_clone()
1649 .expect("failed to clone wait context ctrl channel");
1650
1651 #[cfg(windows)]
1652 let gpu_display_wait_descriptor_ctrl_rd = self
1653 .gpu_display_wait_descriptor_ctrl_rd
1654 .take()
1655 .expect("failed to take gpu_display_wait_descriptor_ctrl_rd");
1656
1657 #[cfg(any(target_os = "android", target_os = "linux"))]
1658 let gpu_cgroup_path = self.gpu_cgroup_path.clone();
1659
1660 let mapper = Arc::clone(&self.mapper);
1661
1662 let gpu_parameters = self.gpu_parameters.clone();
1663 let rutabaga_paths = self.rutabaga_paths.clone();
1664 let rutabaga_component = self.rutabaga_component;
1665 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1666 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1667 });
1668
1669 let (init_finished_tx, init_finished_rx) = mpsc::channel();
1670
1671 let (worker_request_sender, worker_request_receiver) = mpsc::channel();
1672 let (worker_response_sender, worker_response_receiver) = mpsc::channel();
1673
1674 let worker_thread = WorkerThread::start("v_gpu", move |kill_evt| {
1675 #[cfg(any(target_os = "android", target_os = "linux"))]
1676 if let Some(cgroup_path) = gpu_cgroup_path {
1677 move_task_to_cgroup(cgroup_path, base::gettid())
1678 .expect("Failed to move v_gpu into requested cgroup");
1679 }
1680
1681 let mut worker = Worker::new(
1682 gpu_parameters,
1683 rutabaga_paths,
1684 rutabaga_component,
1685 rutabaga_server_descriptor,
1686 display_backends,
1687 display_params,
1688 display_event,
1689 mapper,
1690 event_devices,
1691 external_blob,
1692 fixed_blob_mapping,
1693 udmabuf,
1694 worker_request_receiver,
1695 worker_response_sender,
1696 exit_evt_wrtube,
1697 gpu_control_tube,
1698 resource_bridges,
1699 suspend_evt_copy,
1700 kill_evt,
1701 #[cfg(windows)]
1702 wndproc_thread,
1703 #[cfg(windows)]
1704 gpu_display_wait_descriptor_ctrl_rd,
1705 #[cfg(windows)]
1706 gpu_display_wait_descriptor_ctrl_wr,
1707 snapshot_scratch_directory,
1708 )
1709 .expect("Failed to create virtio gpu worker thread");
1710
1711 let _ = init_finished_tx.send(());
1713
1714 worker.run()
1715 });
1716
1717 self.worker_request_sender = Some(worker_request_sender);
1718 self.worker_response_receiver = Some(worker_response_receiver);
1719 self.worker_suspend_evt = Some(suspend_evt);
1720 self.worker_state = WorkerState::Inactive;
1721 self.worker_thread = Some(worker_thread);
1722
1723 match init_finished_rx.recv() {
1724 Ok(()) => {}
1725 Err(mpsc::RecvError) => panic!("virtio-gpu worker thread init failed"),
1726 }
1727 }
1728
1729 fn stop_worker_thread(&mut self) {
1730 self.worker_request_sender.take();
1731 self.worker_response_receiver.take();
1732 self.worker_suspend_evt.take();
1733 if let Some(worker_thread) = self.worker_thread.take() {
1734 worker_thread.stop();
1735 }
1736 }
1737
1738 fn get_config(&self) -> virtio_gpu_config {
1739 let mut events_read = 0;
1740
1741 if self.display_event.load(Ordering::Relaxed) {
1742 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1743 }
1744
1745 let num_capsets = match self.capset_mask {
1746 0 => match self.rutabaga_component {
1747 RutabagaComponentType::Rutabaga2D => 0,
1748 RutabagaComponentType::VirglRenderer => 3,
1749 RutabagaComponentType::Gfxstream => 1,
1750 _ => unimplemented!(),
1751 },
1752 _ => self.capset_mask.count_ones(),
1753 };
1754
1755 virtio_gpu_config {
1756 events_read: Le32::from(events_read),
1757 events_clear: Le32::from(0),
1758 num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32),
1759 num_capsets: Le32::from(num_capsets),
1760 }
1761 }
1762
1763 pub fn send_exit_evt(&self) -> anyhow::Result<()> {
1765 self.exit_evt_wrtube
1766 .send::<VmEventType>(&VmEventType::Exit)
1767 .context("failed to send exit event")
1768 }
1769}
1770
1771impl VirtioDevice for Gpu {
1772 fn keep_rds(&self) -> Vec<RawDescriptor> {
1773 let mut keep_rds = Vec::new();
1774
1775 #[cfg(any(target_os = "android", target_os = "linux"))]
1781 if cfg!(debug_assertions) {
1782 keep_rds.push(libc::STDOUT_FILENO);
1783 keep_rds.push(libc::STDERR_FILENO);
1784 }
1785
1786 if let Some(ref mapper) = *self.mapper.lock() {
1787 if let Some(descriptor) = mapper.as_raw_descriptor() {
1788 keep_rds.push(descriptor);
1789 }
1790 }
1791
1792 if let Some(ref rutabaga_server_descriptor) = self.rutabaga_server_descriptor {
1793 keep_rds.push(rutabaga_server_descriptor.as_raw_descriptor());
1794 }
1795
1796 keep_rds.push(self.exit_evt_wrtube.as_raw_descriptor());
1797
1798 if let Some(gpu_control_tube) = &self.gpu_control_tube {
1799 keep_rds.push(gpu_control_tube.as_raw_descriptor());
1800 }
1801
1802 if let Some(resource_bridges) = &self.resource_bridges {
1803 resource_bridges.append_raw_descriptors(&mut keep_rds);
1804 }
1805
1806 for event_device in self.event_devices.iter().flatten() {
1807 keep_rds.push(event_device.as_raw_descriptor());
1808 }
1809
1810 keep_rds
1811 }
1812
1813 fn device_type(&self) -> DeviceType {
1814 DeviceType::Gpu
1815 }
1816
1817 fn queue_max_sizes(&self) -> &[u16] {
1818 QUEUE_SIZES
1819 }
1820
1821 fn features(&self) -> u64 {
1822 let mut virtio_gpu_features = 1 << VIRTIO_GPU_F_EDID | 1 << VIRTIO_GPU_F_RESOURCE_BLOB;
1823
1824 if self.rutabaga_component != RutabagaComponentType::Rutabaga2D || self.capset_mask != 0 {
1827 virtio_gpu_features |= 1 << VIRTIO_GPU_F_VIRGL
1828 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1829 | 1 << VIRTIO_GPU_F_CONTEXT_INIT;
1830
1831 if self.udmabuf {
1832 virtio_gpu_features |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1833 }
1834
1835 virtio_gpu_features |= 1 << VIRTIO_GPU_F_FENCE_PASSING;
1838 }
1839
1840 self.base_features | virtio_gpu_features
1841 }
1842
1843 fn ack_features(&mut self, value: u64) {
1844 let _ = value;
1845 }
1846
1847 fn read_config(&self, offset: u64, data: &mut [u8]) {
1848 copy_config(data, 0, self.get_config().as_bytes(), offset);
1849 }
1850
1851 fn write_config(&mut self, offset: u64, data: &[u8]) {
1852 let mut cfg = self.get_config();
1853 copy_config(cfg.as_mut_bytes(), offset, data, 0);
1854 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1855 self.display_event.store(false, Ordering::Relaxed);
1856 }
1857 }
1858
1859 fn on_device_sandboxed(&mut self) {
1860 self.start_worker_thread();
1870 }
1871
1872 fn activate(
1873 &mut self,
1874 mem: GuestMemory,
1875 interrupt: Interrupt,
1876 mut queues: BTreeMap<usize, Queue>,
1877 ) -> anyhow::Result<()> {
1878 if queues.len() != QUEUE_SIZES.len() {
1879 return Err(anyhow!(
1880 "expected {} queues, got {}",
1881 QUEUE_SIZES.len(),
1882 queues.len()
1883 ));
1884 }
1885
1886 let ctrl_queue = SharedQueueReader::new(queues.remove(&0).unwrap());
1887 let cursor_queue = LocalQueueReader::new(queues.remove(&1).unwrap());
1888
1889 self.worker_request_sender
1890 .as_ref()
1891 .context("worker thread missing on activate?")?
1892 .send(WorkerRequest::Activate(WorkerActivateRequest {
1893 resources: GpuActivationResources {
1894 mem,
1895 interrupt,
1896 ctrl_queue,
1897 cursor_queue,
1898 },
1899 }))
1900 .map_err(|e| anyhow!("failed to send virtio gpu worker activate request: {:?}", e))?;
1901
1902 self.worker_response_receiver
1903 .as_ref()
1904 .context("worker thread missing on activate?")?
1905 .recv()
1906 .inspect(|_| self.worker_state = WorkerState::Active)
1907 .inspect_err(|_| self.worker_state = WorkerState::Error)
1908 .context("failed to receive response for virtio gpu worker resume request")??;
1909
1910 Ok(())
1911 }
1912
1913 fn pci_address(&self) -> Option<PciAddress> {
1914 self.pci_address
1915 }
1916
1917 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
1918 Some(SharedMemoryRegion {
1919 id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1920 length: self.pci_bar_size,
1921 })
1922 }
1923
1924 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
1925 self.mapper.lock().replace(mapper);
1926 }
1927
1928 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
1929 !self.fixed_blob_mapping
1931 }
1932
1933 fn get_shared_memory_prepare_type(&mut self) -> SharedMemoryPrepareType {
1934 if self.fixed_blob_mapping {
1935 let cache_type = if cfg!(feature = "noncoherent-dma") {
1936 MemCacheType::CacheNonCoherent
1937 } else {
1938 MemCacheType::CacheCoherent
1939 };
1940 SharedMemoryPrepareType::SingleMappingOnFirst(cache_type)
1941 } else {
1942 SharedMemoryPrepareType::DynamicPerMapping
1943 }
1944 }
1945
1946 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
1956 match self.worker_state {
1957 WorkerState::Error => {
1958 return Err(anyhow!(
1959 "failed to sleep virtio gpu worker which is in error state"
1960 ));
1961 }
1962 WorkerState::Inactive => {
1963 return Ok(None);
1964 }
1965 _ => (),
1966 };
1967
1968 if let (
1969 Some(worker_request_sender),
1970 Some(worker_response_receiver),
1971 Some(worker_suspend_evt),
1972 ) = (
1973 &self.worker_request_sender,
1974 &self.worker_response_receiver,
1975 &self.worker_suspend_evt,
1976 ) {
1977 worker_request_sender
1978 .send(WorkerRequest::Suspend)
1979 .map_err(|e| {
1980 anyhow!(
1981 "failed to send suspend request to virtio gpu worker: {:?}",
1982 e
1983 )
1984 })?;
1985
1986 worker_suspend_evt
1987 .signal()
1988 .context("failed to signal virtio gpu worker suspend event")?;
1989
1990 let response = worker_response_receiver
1991 .recv()
1992 .inspect(|_| self.worker_state = WorkerState::Inactive)
1993 .inspect_err(|_| self.worker_state = WorkerState::Error)
1994 .context("failed to receive response for virtio gpu worker suspend request")??;
1995
1996 worker_suspend_evt
1997 .reset()
1998 .context("failed to reset virtio gpu worker suspend event")?;
1999
2000 match response {
2001 WorkerResponse::Suspend(deactivation_resources) => Ok(deactivation_resources
2002 .queues
2003 .map(|q| q.into_iter().enumerate().collect())),
2004 _ => {
2005 panic!("unexpected response from virtio gpu worker sleep request");
2006 }
2007 }
2008 } else {
2009 Err(anyhow!("virtio gpu worker not available for sleep"))
2010 }
2011 }
2012
2013 fn virtio_wake(
2014 &mut self,
2015 queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
2016 ) -> anyhow::Result<()> {
2017 match self.worker_state {
2018 WorkerState::Error => {
2019 return Err(anyhow!(
2020 "failed to wake virtio gpu worker which is in error state"
2021 ));
2022 }
2023 WorkerState::Active => {
2024 return Ok(());
2025 }
2026 _ => (),
2027 };
2028
2029 match queues_state {
2030 None => Ok(()),
2031 Some((mem, interrupt, queues)) => {
2032 self.activate(mem, interrupt, queues)?;
2036 Ok(())
2037 }
2038 }
2039 }
2040
2041 fn virtio_snapshot(&mut self) -> anyhow::Result<AnySnapshot> {
2042 match self.worker_state {
2043 WorkerState::Error => {
2044 return Err(anyhow!(
2045 "failed to snapshot virtio gpu worker which is in error state"
2046 ));
2047 }
2048 WorkerState::Active => {
2049 return Err(anyhow!(
2050 "failed to snapshot virtio gpu worker which is in active state"
2051 ));
2052 }
2053 _ => (),
2054 };
2055
2056 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2057 (&self.worker_request_sender, &self.worker_response_receiver)
2058 {
2059 worker_request_sender
2060 .send(WorkerRequest::Snapshot)
2061 .map_err(|e| {
2062 anyhow!(
2063 "failed to send snapshot request to virtio gpu worker: {:?}",
2064 e
2065 )
2066 })?;
2067
2068 match worker_response_receiver
2069 .recv()
2070 .inspect_err(|_| self.worker_state = WorkerState::Error)
2071 .context("failed to receive response for virtio gpu worker suspend request")??
2072 {
2073 WorkerResponse::Snapshot(snapshot) => Ok(AnySnapshot::to_any(snapshot)?),
2074 _ => {
2075 panic!("unexpected response from virtio gpu worker sleep request");
2076 }
2077 }
2078 } else {
2079 Err(anyhow!("virtio gpu worker not available for snapshot"))
2080 }
2081 }
2082
2083 fn virtio_restore(&mut self, data: AnySnapshot) -> anyhow::Result<()> {
2084 match self.worker_state {
2085 WorkerState::Error => {
2086 return Err(anyhow!(
2087 "failed to restore virtio gpu worker which is in error state"
2088 ));
2089 }
2090 WorkerState::Active => {
2091 return Err(anyhow!(
2092 "failed to restore virtio gpu worker which is in active state"
2093 ));
2094 }
2095 _ => (),
2096 };
2097
2098 let snapshot: WorkerSnapshot = AnySnapshot::from_any(data)?;
2099
2100 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2101 (&self.worker_request_sender, &self.worker_response_receiver)
2102 {
2103 worker_request_sender
2104 .send(WorkerRequest::Restore(snapshot))
2105 .map_err(|e| {
2106 anyhow!(
2107 "failed to send suspend request to virtio gpu worker: {:?}",
2108 e
2109 )
2110 })?;
2111
2112 let response = worker_response_receiver
2113 .recv()
2114 .inspect_err(|_| self.worker_state = WorkerState::Error)
2115 .context("failed to receive response for virtio gpu worker suspend request")??;
2116
2117 match response {
2118 WorkerResponse::Ok => Ok(()),
2119 _ => {
2120 panic!("unexpected response from virtio gpu worker sleep request");
2121 }
2122 }
2123 } else {
2124 Err(anyhow!("virtio gpu worker not available for restore"))
2125 }
2126 }
2127
2128 fn reset(&mut self) -> anyhow::Result<()> {
2129 self.stop_worker_thread();
2130 Ok(())
2131 }
2132}
2133
2134impl Drop for Gpu {
2135 fn drop(&mut self) {
2136 let _ = self.reset();
2137 }
2138}
2139
2140struct ResourceBridges {
2142 resource_bridges: Vec<Tube>,
2143 should_process: Vec<bool>,
2144}
2145
2146impl ResourceBridges {
2147 pub fn new(resource_bridges: Vec<Tube>) -> Self {
2148 #[cfg(windows)]
2149 assert!(
2150 resource_bridges.is_empty(),
2151 "resource bridges are not supported on Windows"
2152 );
2153
2154 let mut resource_bridges = Self {
2155 resource_bridges,
2156 should_process: Default::default(),
2157 };
2158 resource_bridges.reset_should_process();
2159 resource_bridges
2160 }
2161
2162 pub fn append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>) {
2164 for bridge in &self.resource_bridges {
2165 rds.push(bridge.as_raw_descriptor());
2166 }
2167 }
2168
2169 pub fn add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>) {
2171 for (index, bridge) in self.resource_bridges.iter().enumerate() {
2172 if let Err(e) = wait_ctx.add(bridge, WorkerToken::ResourceBridge { index }) {
2173 error!("failed to add resource bridge to WaitContext: {}", e);
2174 }
2175 }
2176 }
2177
2178 pub fn set_should_process(&mut self, index: usize) {
2181 self.should_process[index] = true;
2182 }
2183
2184 pub fn process_resource_bridges(
2187 &mut self,
2188 state: &mut Frontend,
2189 wait_ctx: &mut WaitContext<WorkerToken>,
2190 ) {
2191 for (bridge, &should_process) in self.resource_bridges.iter().zip(&self.should_process) {
2192 if should_process {
2193 if let Err(e) = state.process_resource_bridge(bridge) {
2194 error!("Failed to process resource bridge: {:#}", e);
2195 error!("Removing that resource bridge from the wait context.");
2196 wait_ctx.delete(bridge).unwrap_or_else(|e| {
2197 error!("Failed to remove faulty resource bridge: {:#}", e)
2198 });
2199 }
2200 }
2201 }
2202 self.reset_should_process();
2203 }
2204
2205 fn reset_should_process(&mut self) {
2206 self.should_process.clear();
2207 self.should_process
2208 .resize(self.resource_bridges.len(), false);
2209 }
2210}