devices/virtio/
wl.rs

1// Copyright 2017 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! This module implements the virtio wayland used by the guest to access the host's wayland server.
6//!
7//! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8//! sending commands to the guest that are generated by the host, usually messages from the wayland
9//! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10//! memory, open a wayland server connection, or send data over an existing connection.
11//!
12//! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13//! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14//! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15//! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16//! well as a page frame number that the guest can access the memory from.
17//!
18//! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19//! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20//! `WlResp` for responses.
21//!
22//! There is one `WlState` instance that contains every known vfd and the current state of `in`
23//! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24//! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25//! requests, such as those registering guest memory.
26//!
27//! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28//! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29//! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31use std::cell::RefCell;
32use std::collections::btree_map::Entry;
33use std::collections::BTreeMap;
34use std::collections::BTreeSet;
35use std::collections::VecDeque;
36use std::convert::From;
37use std::error::Error as StdError;
38use std::fmt;
39use std::fs::File;
40use std::io;
41use std::io::Read;
42use std::io::Seek;
43use std::io::SeekFrom;
44use std::io::Write;
45use std::mem::size_of;
46#[cfg(feature = "gbm")]
47use std::os::raw::c_uint;
48#[cfg(feature = "gbm")]
49use std::os::raw::c_ulonglong;
50use std::os::unix::net::UnixStream;
51use std::path::Path;
52use std::path::PathBuf;
53use std::rc::Rc;
54use std::result;
55use std::time::Duration;
56
57use anyhow::anyhow;
58use anyhow::Context;
59use base::error;
60#[cfg(feature = "gbm")]
61use base::ioctl_iow_nr;
62use base::ioctl_iowr_nr;
63use base::ioctl_with_ref;
64use base::linux::SharedMemoryLinux;
65use base::pagesize;
66use base::pipe;
67use base::round_up_to_page_size;
68use base::unix::FileFlags;
69use base::warn;
70use base::AsRawDescriptor;
71use base::Error;
72use base::Event;
73use base::EventToken;
74use base::EventType;
75#[cfg(feature = "gpu")]
76use base::IntoRawDescriptor;
77#[cfg(feature = "gbm")]
78use base::MemoryMappingBuilder;
79#[cfg(feature = "gbm")]
80use base::MmapError;
81use base::Protection;
82use base::RawDescriptor;
83use base::Result;
84use base::SafeDescriptor;
85use base::ScmSocket;
86use base::SharedMemory;
87use base::Tube;
88use base::TubeError;
89use base::VolatileMemoryError;
90use base::WaitContext;
91use base::WorkerThread;
92use data_model::Le32;
93use data_model::Le64;
94use hypervisor::MemCacheType;
95#[cfg(feature = "gbm")]
96use libc::EBADF;
97#[cfg(feature = "gbm")]
98use libc::EINVAL;
99#[cfg(feature = "gbm")]
100use libc::ENOSYS;
101use remain::sorted;
102use resources::address_allocator::AddressAllocator;
103use resources::AddressRange;
104use resources::Alloc;
105#[cfg(feature = "gbm")]
106use rutabaga_gfx::DrmFormat;
107#[cfg(feature = "gbm")]
108use rutabaga_gfx::ImageAllocationInfo;
109#[cfg(feature = "gbm")]
110use rutabaga_gfx::ImageMemoryRequirements;
111#[cfg(feature = "gbm")]
112use rutabaga_gfx::RutabagaDescriptor;
113#[cfg(feature = "gbm")]
114use rutabaga_gfx::RutabagaError;
115#[cfg(feature = "gbm")]
116use rutabaga_gfx::RutabagaGralloc;
117#[cfg(feature = "gbm")]
118use rutabaga_gfx::RutabagaGrallocBackendFlags;
119#[cfg(feature = "gbm")]
120use rutabaga_gfx::RutabagaGrallocFlags;
121#[cfg(feature = "gbm")]
122use rutabaga_gfx::RutabagaIntoRawDescriptor;
123#[cfg(feature = "gbm")]
124use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
125#[cfg(feature = "gbm")]
126use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
127use static_assertions::const_assert_eq;
128use thiserror::Error as ThisError;
129use vm_control::VmMemorySource;
130use vm_memory::GuestMemory;
131use vm_memory::GuestMemoryError;
132use zerocopy::FromBytes;
133use zerocopy::Immutable;
134use zerocopy::IntoBytes;
135use zerocopy::KnownLayout;
136
137#[cfg(feature = "gpu")]
138use super::resource_bridge::get_resource_info;
139#[cfg(feature = "gpu")]
140use super::resource_bridge::BufferInfo;
141#[cfg(feature = "gpu")]
142use super::resource_bridge::ResourceBridgeError;
143#[cfg(feature = "gpu")]
144use super::resource_bridge::ResourceInfo;
145#[cfg(feature = "gpu")]
146use super::resource_bridge::ResourceRequest;
147use super::DeviceType;
148use super::Interrupt;
149use super::Queue;
150use super::Reader;
151use super::SharedMemoryMapper;
152use super::SharedMemoryRegion;
153use super::VirtioDevice;
154use super::Writer;
155use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
156use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
157use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
158
159const QUEUE_SIZE: u16 = 256;
160const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
161
162const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
163const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
164const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
165const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
166const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
167const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
168const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
169const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
170#[cfg(feature = "gbm")]
171const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
172#[cfg(feature = "gbm")]
173const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
174#[cfg(feature = "gpu")]
175const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
176const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
177const VIRTIO_WL_RESP_OK: u32 = 4096;
178const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
179#[cfg(feature = "gbm")]
180const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
181const VIRTIO_WL_RESP_ERR: u32 = 4352;
182const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
183const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
184const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
185const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
186const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
187const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
188const VIRTIO_WL_VFD_READ: u32 = 0x2;
189const VIRTIO_WL_VFD_MAP: u32 = 0x2;
190const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
191const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
192
193const NEXT_VFD_ID_BASE: u32 = 0x40000000;
194const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
195// Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
196// number of allocs.
197const IN_BUFFER_LEN: usize =
198    0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
199
200#[cfg(feature = "gbm")]
201const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
202
203#[cfg(feature = "gbm")]
204const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
205#[cfg(feature = "gbm")]
206const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
207#[cfg(feature = "gbm")]
208const DMA_BUF_SYNC_END: c_uint = 0x4;
209
210#[cfg(feature = "gbm")]
211#[repr(C)]
212#[derive(Copy, Clone)]
213struct dma_buf_sync {
214    flags: c_ulonglong,
215}
216
217#[cfg(feature = "gbm")]
218ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
219
220#[repr(C)]
221#[derive(Copy, Clone, Default)]
222struct sync_file_info {
223    name: [u8; 32],
224    status: i32,
225    flags: u32,
226    num_fences: u32,
227    pad: u32,
228    sync_fence_info: u64,
229}
230
231ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
232
233fn is_fence(f: &File) -> bool {
234    let info = sync_file_info::default();
235    // SAFETY:
236    // Safe as f is a valid file
237    unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO, &info) == 0 }
238}
239
240#[cfg(feature = "gbm")]
241#[derive(Debug, Default)]
242struct GpuMemoryPlaneDesc {
243    stride: u32,
244    offset: u32,
245}
246
247#[cfg(feature = "gbm")]
248#[derive(Debug, Default)]
249struct GpuMemoryDesc {
250    planes: [GpuMemoryPlaneDesc; 3],
251}
252
253const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
254const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
255const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
256const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
257
258const VIRTIO_WL_PFN_SHIFT: u32 = 12;
259
260fn encode_vfd_new(
261    writer: &mut Writer,
262    resp: bool,
263    vfd_id: u32,
264    flags: u32,
265    pfn: u64,
266    size: u32,
267) -> WlResult<()> {
268    let ctrl_vfd_new = CtrlVfdNew {
269        hdr: CtrlHeader {
270            type_: Le32::from(if resp {
271                VIRTIO_WL_RESP_VFD_NEW
272            } else {
273                VIRTIO_WL_CMD_VFD_NEW
274            }),
275            flags: Le32::from(0),
276        },
277        id: Le32::from(vfd_id),
278        flags: Le32::from(flags),
279        pfn: Le64::from(pfn),
280        size: Le32::from(size),
281        padding: Default::default(),
282    };
283
284    writer
285        .write_obj(ctrl_vfd_new)
286        .map_err(WlError::WriteResponse)
287}
288
289#[cfg(feature = "gbm")]
290fn encode_vfd_new_dmabuf(
291    writer: &mut Writer,
292    vfd_id: u32,
293    flags: u32,
294    pfn: u64,
295    size: u32,
296    desc: GpuMemoryDesc,
297) -> WlResult<()> {
298    let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
299        hdr: CtrlHeader {
300            type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
301            flags: Le32::from(0),
302        },
303        id: Le32::from(vfd_id),
304        flags: Le32::from(flags),
305        pfn: Le64::from(pfn),
306        size: Le32::from(size),
307        width: Le32::from(0),
308        height: Le32::from(0),
309        format: Le32::from(0),
310        stride0: Le32::from(desc.planes[0].stride),
311        stride1: Le32::from(desc.planes[1].stride),
312        stride2: Le32::from(desc.planes[2].stride),
313        offset0: Le32::from(desc.planes[0].offset),
314        offset1: Le32::from(desc.planes[1].offset),
315        offset2: Le32::from(desc.planes[2].offset),
316    };
317
318    writer
319        .write_obj(ctrl_vfd_new_dmabuf)
320        .map_err(WlError::WriteResponse)
321}
322
323fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
324    let ctrl_vfd_recv = CtrlVfdRecv {
325        hdr: CtrlHeader {
326            type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
327            flags: Le32::from(0),
328        },
329        id: Le32::from(vfd_id),
330        vfd_count: Le32::from(vfd_ids.len() as u32),
331    };
332    writer
333        .write_obj(ctrl_vfd_recv)
334        .map_err(WlError::WriteResponse)?;
335
336    for &recv_vfd_id in vfd_ids.iter() {
337        writer
338            .write_obj(Le32::from(recv_vfd_id))
339            .map_err(WlError::WriteResponse)?;
340    }
341
342    writer.write_all(data).map_err(WlError::WriteResponse)
343}
344
345fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
346    let ctrl_vfd_new = CtrlVfd {
347        hdr: CtrlHeader {
348            type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
349            flags: Le32::from(0),
350        },
351        id: Le32::from(vfd_id),
352    };
353
354    writer
355        .write_obj(ctrl_vfd_new)
356        .map_err(WlError::WriteResponse)
357}
358
359fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
360    match resp {
361        WlResp::VfdNew {
362            id,
363            flags,
364            pfn,
365            size,
366            resp,
367        } => encode_vfd_new(writer, resp, id, flags, pfn, size),
368        #[cfg(feature = "gbm")]
369        WlResp::VfdNewDmabuf {
370            id,
371            flags,
372            pfn,
373            size,
374            desc,
375        } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
376        WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
377        WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
378        r => writer
379            .write_obj(Le32::from(r.get_code()))
380            .map_err(WlError::WriteResponse),
381    }
382}
383
384#[allow(dead_code)]
385#[sorted]
386#[derive(ThisError, Debug)]
387enum WlError {
388    #[error("overflow in calculation")]
389    CheckedOffset,
390    #[error("failed to synchronize DMABuf access: {0}")]
391    DmabufSync(io::Error),
392    #[error("failed to create shared memory from descriptor: {0}")]
393    FromSharedMemory(Error),
394    #[error("failed to get seals: {0}")]
395    GetSeals(Error),
396    #[error("gralloc error: {0}")]
397    #[cfg(feature = "gbm")]
398    GrallocError(#[from] RutabagaError),
399    #[error("access violation in guest memory: {0}")]
400    GuestMemory(#[from] GuestMemoryError),
401    #[error("invalid string: {0}")]
402    InvalidString(std::str::Utf8Error),
403    #[error("failed to create shared memory allocation: {0}")]
404    NewAlloc(Error),
405    #[error("failed to create pipe: {0}")]
406    NewPipe(Error),
407    #[error("error parsing descriptor: {0}")]
408    ParseDesc(io::Error),
409    #[error("failed to read a pipe: {0}")]
410    ReadPipe(io::Error),
411    #[error("failed to recv on a socket: {0}")]
412    RecvVfd(io::Error),
413    #[error("failed to send on a socket: {0}")]
414    SendVfd(io::Error),
415    #[error("shmem mapper failure: {0}")]
416    ShmemMapperError(anyhow::Error),
417    #[error("failed to connect socket: {0}")]
418    SocketConnect(io::Error),
419    #[error("failed to set socket as non-blocking: {0}")]
420    SocketNonBlock(io::Error),
421    #[error("unknown socket name: {0}")]
422    UnknownSocketName(String),
423    #[error("invalid response from parent VM")]
424    VmBadResponse,
425    #[error("failed to control parent VM: {0}")]
426    VmControl(TubeError),
427    #[error("access violating in guest volatile memory: {0}")]
428    VolatileMemory(#[from] VolatileMemoryError),
429    #[error("failed to listen to descriptor on wait context: {0}")]
430    WaitContextAdd(Error),
431    #[error("failed to write to a pipe: {0}")]
432    WritePipe(io::Error),
433    #[error("failed to write response: {0}")]
434    WriteResponse(io::Error),
435}
436
437type WlResult<T> = result::Result<T, WlError>;
438
439pub const WL_SHMEM_ID: u8 = 0;
440pub const WL_SHMEM_SIZE: u64 = 1 << 32;
441
442struct VmRequesterState {
443    mapper: Box<dyn SharedMemoryMapper>,
444    #[cfg(feature = "gbm")]
445    gralloc: RutabagaGralloc,
446
447    // Allocator for shm address space
448    address_allocator: AddressAllocator,
449
450    // Map of existing mappings in the shm address space
451    allocs: BTreeMap<u64 /* offset */, Alloc>,
452
453    // The id for the next shmem allocation
454    next_alloc: usize,
455}
456
457#[derive(Clone)]
458struct VmRequester {
459    state: Rc<RefCell<VmRequesterState>>,
460}
461
462// The following are wrappers to avoid base dependencies in the rutabaga crate
463#[cfg(feature = "gbm")]
464fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
465    // SAFETY:
466    // Safe because we own the SafeDescriptor at this point.
467    unsafe { base::FromRawDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
468}
469
470impl VmRequester {
471    fn new(
472        mapper: Box<dyn SharedMemoryMapper>,
473        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
474    ) -> VmRequester {
475        VmRequester {
476            state: Rc::new(RefCell::new(VmRequesterState {
477                mapper,
478                #[cfg(feature = "gbm")]
479                gralloc,
480                address_allocator: AddressAllocator::new(
481                    AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
482                    Some(pagesize() as u64),
483                    None,
484                )
485                .expect("failed to create allocator"),
486                allocs: BTreeMap::new(),
487                next_alloc: 0,
488            })),
489        }
490    }
491
492    fn unregister_memory(&self, offset: u64) -> WlResult<()> {
493        let mut state = self.state.borrow_mut();
494        state
495            .mapper
496            .remove_mapping(offset)
497            .map_err(WlError::ShmemMapperError)?;
498        let alloc = state
499            .allocs
500            .remove(&offset)
501            .context("unknown offset")
502            .map_err(WlError::ShmemMapperError)?;
503        state
504            .address_allocator
505            .release(alloc)
506            .expect("corrupt address space");
507        Ok(())
508    }
509
510    #[cfg(feature = "gbm")]
511    fn allocate_and_register_gpu_memory(
512        &self,
513        width: u32,
514        height: u32,
515        format: u32,
516    ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
517        let mut state = self.state.borrow_mut();
518
519        let img = ImageAllocationInfo {
520            width,
521            height,
522            drm_format: DrmFormat::from(format),
523            // Linear layout is a requirement as virtio wayland guest expects
524            // this for CPU access to the buffer. Scanout and texturing are
525            // optional as the consumer (wayland compositor) is expected to
526            // fall-back to a less efficient mechanisms for presentation if
527            // neccesary. In practice, linear buffers for commonly used formats
528            // will also support scanout and texturing.
529            flags: RutabagaGrallocFlags::empty().use_linear(true),
530        };
531
532        let reqs = state
533            .gralloc
534            .get_image_memory_requirements(img)
535            .map_err(WlError::GrallocError)?;
536        let handle = state
537            .gralloc
538            .allocate_memory(reqs)
539            .map_err(WlError::GrallocError)?;
540        drop(state);
541
542        let safe_descriptor = to_safe_descriptor(handle.os_handle);
543        self.register_memory(
544            safe_descriptor
545                .try_clone()
546                .context("failed to dup gfx handle")
547                .map_err(WlError::ShmemMapperError)?,
548            reqs.size,
549            Protection::read_write(),
550        )
551        .map(|info| (info, safe_descriptor, reqs))
552    }
553
554    fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
555        let prot = match FileFlags::from_file(shm) {
556            Ok(FileFlags::Read) => Protection::read(),
557            Ok(FileFlags::Write) => Protection::write(),
558            Ok(FileFlags::ReadWrite) => {
559                let seals = shm.get_seals().map_err(WlError::GetSeals)?;
560                if seals.write_seal() {
561                    Protection::read()
562                } else {
563                    Protection::read_write()
564                }
565            }
566            Err(e) => {
567                return Err(WlError::ShmemMapperError(anyhow!(
568                    "failed to get file descriptor flags with error: {:?}",
569                    e
570                )))
571            }
572        };
573        self.register_memory(
574            SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
575                .context("failed to create safe descriptor")
576                .map_err(WlError::ShmemMapperError)?,
577            shm.size(),
578            prot,
579        )
580    }
581
582    fn register_memory(
583        &self,
584        descriptor: SafeDescriptor,
585        size: u64,
586        prot: Protection,
587    ) -> WlResult<u64> {
588        let mut state = self.state.borrow_mut();
589        let size = round_up_to_page_size(size as usize) as u64;
590
591        let source = VmMemorySource::Descriptor {
592            descriptor,
593            offset: 0,
594            size,
595        };
596        let alloc = Alloc::Anon(state.next_alloc);
597        state.next_alloc += 1;
598        let offset = state
599            .address_allocator
600            .allocate(size, alloc, "virtio-wl".to_owned())
601            .context("failed to allocate offset")
602            .map_err(WlError::ShmemMapperError)?;
603
604        match state
605            .mapper
606            .add_mapping(source, offset, prot, MemCacheType::CacheCoherent)
607        {
608            Ok(()) => {
609                state.allocs.insert(offset, alloc);
610                Ok(offset)
611            }
612            Err(e) => {
613                // We just allocated it ourselves, it must exist.
614                state
615                    .address_allocator
616                    .release(alloc)
617                    .expect("corrupt address space");
618                Err(WlError::ShmemMapperError(e))
619            }
620        }
621    }
622}
623
624#[repr(C)]
625#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
626struct CtrlHeader {
627    type_: Le32,
628    flags: Le32,
629}
630
631#[repr(C)]
632#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
633struct CtrlVfdNew {
634    hdr: CtrlHeader,
635    id: Le32,
636    flags: Le32,
637    pfn: Le64,
638    size: Le32,
639    padding: Le32,
640}
641
642#[repr(C)]
643#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
644struct CtrlVfdNewCtxNamed {
645    hdr: CtrlHeader,
646    id: Le32,
647    flags: Le32, // Ignored.
648    pfn: Le64,   // Ignored.
649    size: Le32,  // Ignored.
650    name: [u8; 32],
651    _pad: u32,
652}
653const_assert_eq!(size_of::<CtrlVfdNewCtxNamed>(), 64);
654
655#[repr(C)]
656#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
657#[cfg(feature = "gbm")]
658struct CtrlVfdNewDmabuf {
659    hdr: CtrlHeader,
660    id: Le32,
661    flags: Le32,
662    pfn: Le64,
663    size: Le32,
664    width: Le32,
665    height: Le32,
666    format: Le32,
667    stride0: Le32,
668    stride1: Le32,
669    stride2: Le32,
670    offset0: Le32,
671    offset1: Le32,
672    offset2: Le32,
673}
674
675#[cfg(feature = "gbm")]
676#[repr(C)]
677#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
678struct CtrlVfdDmabufSync {
679    hdr: CtrlHeader,
680    id: Le32,
681    flags: Le32,
682}
683
684#[repr(C)]
685#[derive(Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)]
686struct CtrlVfdRecv {
687    hdr: CtrlHeader,
688    id: Le32,
689    vfd_count: Le32,
690}
691
692#[repr(C)]
693#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
694struct CtrlVfd {
695    hdr: CtrlHeader,
696    id: Le32,
697}
698
699#[repr(C)]
700#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
701struct CtrlVfdSend {
702    hdr: CtrlHeader,
703    id: Le32,
704    vfd_count: Le32,
705    // Remainder is an array of vfd_count IDs followed by data.
706}
707
708#[repr(C)]
709#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
710struct CtrlVfdSendVfd {
711    kind: Le32,
712    id: Le32,
713}
714
715#[repr(C)]
716#[derive(Copy, Clone, FromBytes, Immutable, KnownLayout)]
717union CtrlVfdSendVfdV2Payload {
718    id: Le32,
719    seqno: Le64,
720}
721
722#[repr(C)]
723#[derive(Copy, Clone, FromBytes, Immutable, KnownLayout)]
724struct CtrlVfdSendVfdV2 {
725    kind: Le32,
726    payload: CtrlVfdSendVfdV2Payload,
727}
728
729impl CtrlVfdSendVfdV2 {
730    fn id(&self) -> Le32 {
731        assert!(
732            self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
733                || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
734        );
735        // SAFETY: trivially safe given we assert kind
736        unsafe { self.payload.id }
737    }
738    #[cfg(feature = "gpu")]
739    fn seqno(&self) -> Le64 {
740        assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
741        // SAFETY: trivially safe given we assert kind
742        unsafe { self.payload.seqno }
743    }
744}
745
746#[derive(Debug)]
747#[allow(dead_code)]
748enum WlResp<'a> {
749    Ok,
750    VfdNew {
751        id: u32,
752        flags: u32,
753        pfn: u64,
754        size: u32,
755        // The VfdNew variant can be either a response or a command depending on this `resp`. This
756        // is important for the `get_code` method.
757        resp: bool,
758    },
759    #[cfg(feature = "gbm")]
760    VfdNewDmabuf {
761        id: u32,
762        flags: u32,
763        pfn: u64,
764        size: u32,
765        desc: GpuMemoryDesc,
766    },
767    VfdRecv {
768        id: u32,
769        data: &'a [u8],
770        vfds: &'a [u32],
771    },
772    VfdHup {
773        id: u32,
774    },
775    Err(Box<dyn StdError>),
776    OutOfMemory,
777    InvalidId,
778    InvalidType,
779    InvalidFlags,
780    InvalidCommand,
781}
782
783impl WlResp<'_> {
784    fn get_code(&self) -> u32 {
785        match *self {
786            WlResp::Ok => VIRTIO_WL_RESP_OK,
787            WlResp::VfdNew { resp, .. } => {
788                if resp {
789                    VIRTIO_WL_RESP_VFD_NEW
790                } else {
791                    VIRTIO_WL_CMD_VFD_NEW
792                }
793            }
794            #[cfg(feature = "gbm")]
795            WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
796            WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
797            WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
798            WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
799            WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
800            WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
801            WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
802            WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
803            WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
804        }
805    }
806}
807
808#[derive(Default)]
809struct WlVfd {
810    socket: Option<ScmSocket<UnixStream>>,
811    guest_shared_memory: Option<SharedMemory>,
812    remote_pipe: Option<File>,
813    local_pipe: Option<(u32 /* flags */, File)>,
814    slot: Option<(u64 /* offset */, VmRequester)>,
815    #[cfg(feature = "gbm")]
816    is_dmabuf: bool,
817    #[cfg(feature = "gbm")]
818    map_info: u32,
819    fence: Option<File>,
820    is_fence: bool,
821}
822
823impl fmt::Debug for WlVfd {
824    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
825        write!(f, "WlVfd {{")?;
826        if let Some(s) = &self.socket {
827            write!(f, " socket: {}", s.as_raw_descriptor())?;
828        }
829        if let Some((offset, _)) = &self.slot {
830            write!(f, " offset: {offset}")?;
831        }
832        if let Some(s) = &self.remote_pipe {
833            write!(f, " remote: {}", s.as_raw_descriptor())?;
834        }
835        if let Some((_, s)) = &self.local_pipe {
836            write!(f, " local: {}", s.as_raw_descriptor())?;
837        }
838        write!(f, " }}")
839    }
840}
841
842#[cfg(feature = "gbm")]
843fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
844    let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
845        .from_shared_memory(shared_memory)
846        .build()
847    {
848        Ok(v) => v,
849        Err(_) => return Err(Error::new(EINVAL)),
850    };
851    if let Err(err) = mmap.flush_all() {
852        base::error!("failed to flush shared memory: {}", err);
853        return match err {
854            MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
855            _ => Err(Error::new(EINVAL)),
856        };
857    }
858    Ok(())
859}
860
861impl WlVfd {
862    fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
863        let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
864        let mut vfd = WlVfd::default();
865        vfd.socket = Some(socket.try_into().map_err(WlError::SocketConnect)?);
866        Ok(vfd)
867    }
868
869    fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
870        let size_page_aligned = round_up_to_page_size(size as usize) as u64;
871        let vfd_shm =
872            SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
873
874        let offset = vm.register_shmem(&vfd_shm)?;
875
876        let mut vfd = WlVfd::default();
877        vfd.guest_shared_memory = Some(vfd_shm);
878        vfd.slot = Some((offset, vm));
879        Ok(vfd)
880    }
881
882    #[cfg(feature = "gbm")]
883    fn dmabuf(
884        vm: VmRequester,
885        width: u32,
886        height: u32,
887        format: u32,
888    ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
889        let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
890        let mut vfd = WlVfd::default();
891        let vfd_shm =
892            SharedMemory::from_safe_descriptor(desc, reqs.size).map_err(WlError::NewAlloc)?;
893
894        let mut desc = GpuMemoryDesc::default();
895        for i in 0..3 {
896            desc.planes[i] = GpuMemoryPlaneDesc {
897                stride: reqs.strides[i],
898                offset: reqs.offsets[i],
899            }
900        }
901
902        vfd.guest_shared_memory = Some(vfd_shm);
903        vfd.slot = Some((offset, vm));
904        vfd.is_dmabuf = true;
905        vfd.map_info = reqs.map_info;
906        Ok((vfd, desc))
907    }
908
909    #[cfg(feature = "gbm")]
910    fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
911        if !self.is_dmabuf {
912            return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
913        }
914
915        match &self.guest_shared_memory {
916            Some(descriptor) => {
917                let sync = dma_buf_sync {
918                    flags: flags as u64,
919                };
920                // SAFETY:
921                // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
922                if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC, &sync) } < 0 {
923                    return Err(WlError::DmabufSync(io::Error::last_os_error()));
924                }
925
926                // virtio-wl kernel driver always maps dmabufs with WB memory type, regardless of
927                // the host memory type (which is wrong). However, to avoid changing the protocol,
928                // assume that all guest writes are cached and ensure clflush-like ops on all mapped
929                // cachelines if the host mapping is not cached.
930                const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
931                if (flags & END_WRITE_MASK) == END_WRITE_MASK
932                    && (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
933                {
934                    if let Err(err) = flush_shared_memory(descriptor) {
935                        base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
936                        return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
937                            err.errno(),
938                        )));
939                    }
940                }
941                Ok(())
942            }
943            None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
944        }
945    }
946
947    fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
948        let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
949        let mut vfd = WlVfd::default();
950        vfd.remote_pipe = Some(read_pipe);
951        vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
952        Ok(vfd)
953    }
954
955    fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
956        let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
957        let mut vfd = WlVfd::default();
958        vfd.remote_pipe = Some(write_pipe);
959        vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
960        Ok(vfd)
961    }
962
963    fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
964        // We need to determine if the given file is more like shared memory or a pipe/socket. A
965        // quick and easy check is to seek to the end of the file. If it works we assume it's not a
966        // pipe/socket because those have no end. We can even use that seek location as an indicator
967        // for how big the shared memory chunk to map into guest memory is. If seeking to the end
968        // fails, we assume it's a socket or pipe with read/write semantics.
969        if descriptor.seek(SeekFrom::End(0)).is_ok() {
970            let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
971            let offset = vm.register_shmem(&shm)?;
972
973            let mut vfd = WlVfd::default();
974            vfd.guest_shared_memory = Some(shm);
975            vfd.slot = Some((offset, vm));
976            Ok(vfd)
977        } else if is_fence(&descriptor) {
978            let mut vfd = WlVfd::default();
979            vfd.is_fence = true;
980            vfd.fence = Some(descriptor);
981            Ok(vfd)
982        } else {
983            let flags = match FileFlags::from_file(&descriptor) {
984                Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
985                Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
986                Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
987                _ => 0,
988            };
989            let mut vfd = WlVfd::default();
990            vfd.local_pipe = Some((flags, descriptor));
991            Ok(vfd)
992        }
993    }
994
995    fn flags(&self, use_transition_flags: bool) -> u32 {
996        let mut flags = 0;
997        if use_transition_flags {
998            if self.socket.is_some() {
999                flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
1000            }
1001            if let Some((f, _)) = self.local_pipe {
1002                flags |= f;
1003            }
1004            if self.is_fence {
1005                flags |= VIRTIO_WL_VFD_FENCE;
1006            }
1007        } else {
1008            if self.socket.is_some() {
1009                flags |= VIRTIO_WL_VFD_CONTROL;
1010            }
1011            if self.slot.is_some() {
1012                flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
1013            }
1014        }
1015        flags
1016    }
1017
1018    // Offset within the shared memory region this VFD was mapped at.
1019    fn offset(&self) -> Option<u64> {
1020        self.slot.as_ref().map(|s| s.0)
1021    }
1022
1023    // Size in bytes of the shared memory VFD.
1024    fn size(&self) -> Option<u64> {
1025        self.guest_shared_memory.as_ref().map(|shm| shm.size())
1026    }
1027
1028    // The descriptor that gets sent if this VFD is sent over a socket.
1029    fn send_descriptor(&self) -> Option<RawDescriptor> {
1030        self.guest_shared_memory
1031            .as_ref()
1032            .map(|shm| shm.as_raw_descriptor())
1033            .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
1034            .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
1035            .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
1036    }
1037
1038    // The FD that is used for polling for events on this VFD.
1039    fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
1040        self.socket
1041            .as_ref()
1042            .map(|s| s as &dyn AsRawDescriptor)
1043            .or_else(|| {
1044                self.local_pipe
1045                    .as_ref()
1046                    .map(|(_, p)| p as &dyn AsRawDescriptor)
1047            })
1048            .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
1049    }
1050
1051    // Sends data/files from the guest to the host over this VFD.
1052    fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
1053        if let Some(socket) = &self.socket {
1054            socket
1055                .send_vectored_with_fds(&data.get_remaining(), rds)
1056                .map_err(WlError::SendVfd)?;
1057            // All remaining data in `data` is now considered consumed.
1058            data.consume(usize::MAX);
1059            Ok(WlResp::Ok)
1060        } else if let Some((_, local_pipe)) = &mut self.local_pipe {
1061            // Impossible to send descriptors over a simple pipe.
1062            if !rds.is_empty() {
1063                return Ok(WlResp::InvalidType);
1064            }
1065            data.read_to(local_pipe, usize::MAX)
1066                .map_err(WlError::WritePipe)?;
1067            Ok(WlResp::Ok)
1068        } else {
1069            Ok(WlResp::InvalidType)
1070        }
1071    }
1072
1073    // Receives data/files from the host for this VFD and queues it for the guest.
1074    fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
1075        if let Some(socket) = self.socket.take() {
1076            let mut buf = vec![0; IN_BUFFER_LEN];
1077            // If any errors happen, the socket will get dropped, preventing more reading.
1078            let (len, descriptors) = socket
1079                .recv_with_fds(&mut buf, VIRTWL_SEND_MAX_ALLOCS)
1080                .map_err(WlError::RecvVfd)?;
1081            // If any data gets read, the put the socket back for future recv operations.
1082            if len != 0 || !descriptors.is_empty() {
1083                buf.truncate(len);
1084                buf.shrink_to_fit();
1085                self.socket = Some(socket);
1086                in_file_queue.extend(descriptors.into_iter().map(File::from));
1087                return Ok(buf);
1088            }
1089            Ok(Vec::new())
1090        } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
1091            let mut buf = vec![0; IN_BUFFER_LEN];
1092            let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
1093            if len != 0 {
1094                buf.truncate(len);
1095                buf.shrink_to_fit();
1096                self.local_pipe = Some((flags, local_pipe));
1097                return Ok(buf);
1098            }
1099            Ok(Vec::new())
1100        } else {
1101            Ok(Vec::new())
1102        }
1103    }
1104
1105    // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
1106    // up events.
1107    fn close_remote(&mut self) {
1108        self.remote_pipe = None;
1109    }
1110
1111    fn close(&mut self) -> WlResult<()> {
1112        if let Some((offset, vm)) = self.slot.take() {
1113            vm.unregister_memory(offset)?;
1114        }
1115        self.socket = None;
1116        self.remote_pipe = None;
1117        self.local_pipe = None;
1118        Ok(())
1119    }
1120}
1121
1122impl Drop for WlVfd {
1123    fn drop(&mut self) {
1124        let _ = self.close();
1125    }
1126}
1127
1128#[derive(Debug)]
1129enum WlRecv {
1130    Vfd { id: u32 },
1131    Data { buf: Vec<u8> },
1132    Hup,
1133}
1134
1135pub struct WlState {
1136    wayland_paths: BTreeMap<String, PathBuf>,
1137    vm: VmRequester,
1138    resource_bridge: Option<Tube>,
1139    use_transition_flags: bool,
1140    wait_ctx: WaitContext<u32>,
1141    vfds: BTreeMap<u32, WlVfd>,
1142    next_vfd_id: u32,
1143    in_file_queue: Vec<File>,
1144    in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
1145    current_recv_vfd: Option<u32>,
1146    recv_vfds: Vec<u32>,
1147    #[cfg(feature = "gpu")]
1148    signaled_fence: Option<SafeDescriptor>,
1149    use_send_vfd_v2: bool,
1150    address_offset: Option<u64>,
1151}
1152
1153impl WlState {
1154    /// Create a new `WlState` instance for running a virtio-wl device.
1155    pub fn new(
1156        wayland_paths: BTreeMap<String, PathBuf>,
1157        mapper: Box<dyn SharedMemoryMapper>,
1158        use_transition_flags: bool,
1159        use_send_vfd_v2: bool,
1160        resource_bridge: Option<Tube>,
1161        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
1162        address_offset: Option<u64>,
1163    ) -> WlState {
1164        WlState {
1165            wayland_paths,
1166            vm: VmRequester::new(
1167                mapper,
1168                #[cfg(feature = "gbm")]
1169                gralloc,
1170            ),
1171            resource_bridge,
1172            wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
1173            use_transition_flags,
1174            vfds: BTreeMap::new(),
1175            next_vfd_id: NEXT_VFD_ID_BASE,
1176            in_file_queue: Vec::new(),
1177            in_queue: VecDeque::new(),
1178            current_recv_vfd: None,
1179            recv_vfds: Vec::new(),
1180            #[cfg(feature = "gpu")]
1181            signaled_fence: None,
1182            use_send_vfd_v2,
1183            address_offset,
1184        }
1185    }
1186
1187    /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
1188    /// long-term solution is to replace the WaitContext completely by spawning async workers
1189    /// instead.
1190    pub fn wait_ctx(&self) -> &WaitContext<u32> {
1191        &self.wait_ctx
1192    }
1193
1194    fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
1195        if id & VFD_ID_HOST_MASK != 0 {
1196            return Ok(WlResp::InvalidId);
1197        }
1198
1199        if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
1200            return Ok(WlResp::InvalidFlags);
1201        }
1202
1203        if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
1204            return Ok(WlResp::InvalidFlags);
1205        }
1206
1207        match self.vfds.entry(id) {
1208            Entry::Vacant(entry) => {
1209                let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
1210                    WlVfd::pipe_remote_read_local_write()?
1211                } else if flags & VIRTIO_WL_VFD_READ != 0 {
1212                    WlVfd::pipe_remote_write_local_read()?
1213                } else {
1214                    return Ok(WlResp::InvalidFlags);
1215                };
1216                self.wait_ctx
1217                    .add(vfd.wait_descriptor().unwrap(), id)
1218                    .map_err(WlError::WaitContextAdd)?;
1219                let resp = WlResp::VfdNew {
1220                    id,
1221                    flags: 0,
1222                    pfn: 0,
1223                    size: 0,
1224                    resp: true,
1225                };
1226                entry.insert(vfd);
1227                Ok(resp)
1228            }
1229            Entry::Occupied(_) => Ok(WlResp::InvalidId),
1230        }
1231    }
1232
1233    fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
1234        if id & VFD_ID_HOST_MASK != 0 {
1235            return Ok(WlResp::InvalidId);
1236        }
1237
1238        if self.use_transition_flags {
1239            if flags != 0 {
1240                return Ok(WlResp::InvalidFlags);
1241            }
1242        } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
1243            return Ok(WlResp::Err(Box::from("invalid flags")));
1244        }
1245
1246        if self.vfds.contains_key(&id) {
1247            return Ok(WlResp::InvalidId);
1248        }
1249        let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
1250        let resp = WlResp::VfdNew {
1251            id,
1252            flags,
1253            pfn: self.compute_pfn(&vfd.offset()),
1254            size: vfd.size().unwrap_or_default() as u32,
1255            resp: true,
1256        };
1257        self.vfds.insert(id, vfd);
1258        Ok(resp)
1259    }
1260
1261    #[cfg(feature = "gbm")]
1262    fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1263        if id & VFD_ID_HOST_MASK != 0 {
1264            return Ok(WlResp::InvalidId);
1265        }
1266
1267        if self.vfds.contains_key(&id) {
1268            return Ok(WlResp::InvalidId);
1269        }
1270        let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1271        let resp = WlResp::VfdNewDmabuf {
1272            id,
1273            flags: 0,
1274            pfn: self.compute_pfn(&vfd.offset()),
1275            size: vfd.size().unwrap_or_default() as u32,
1276            desc,
1277        };
1278        self.vfds.insert(id, vfd);
1279        Ok(resp)
1280    }
1281
1282    #[cfg(feature = "gbm")]
1283    fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1284        if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1285            return Ok(WlResp::InvalidFlags);
1286        }
1287
1288        match self.vfds.get_mut(&vfd_id) {
1289            Some(vfd) => {
1290                vfd.dmabuf_sync(flags)?;
1291                Ok(WlResp::Ok)
1292            }
1293            None => Ok(WlResp::InvalidId),
1294        }
1295    }
1296
1297    fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1298        if id & VFD_ID_HOST_MASK != 0 {
1299            return Ok(WlResp::InvalidId);
1300        }
1301
1302        let flags = if self.use_transition_flags {
1303            VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1304        } else {
1305            VIRTIO_WL_VFD_CONTROL
1306        };
1307
1308        match self.vfds.entry(id) {
1309            Entry::Vacant(entry) => {
1310                let vfd = entry.insert(WlVfd::connect(
1311                    self.wayland_paths
1312                        .get(name)
1313                        .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1314                )?);
1315                self.wait_ctx
1316                    .add(vfd.wait_descriptor().unwrap(), id)
1317                    .map_err(WlError::WaitContextAdd)?;
1318                Ok(WlResp::VfdNew {
1319                    id,
1320                    flags,
1321                    pfn: 0,
1322                    size: 0,
1323                    resp: true,
1324                })
1325            }
1326            Entry::Occupied(_) => Ok(WlResp::InvalidId),
1327        }
1328    }
1329
1330    fn process_wait_context(&mut self) {
1331        let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1332            Ok(v) => v,
1333            Err(e) => {
1334                error!("failed waiting for vfd evens: {}", e);
1335                return;
1336            }
1337        };
1338
1339        for event in events.iter().filter(|e| e.is_readable) {
1340            if let Err(e) = self.recv(event.token) {
1341                error!("failed to recv from vfd: {}", e)
1342            }
1343        }
1344
1345        for event in events.iter().filter(|e| e.is_hungup) {
1346            if !event.is_readable {
1347                let vfd_id = event.token;
1348                if let Some(descriptor) =
1349                    self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1350                {
1351                    if let Err(e) = self.wait_ctx.delete(descriptor) {
1352                        warn!("failed to remove hungup vfd from poll context: {}", e);
1353                    }
1354                }
1355                self.in_queue.push_back((vfd_id, WlRecv::Hup));
1356            }
1357        }
1358    }
1359
1360    fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1361        let mut to_delete = BTreeSet::new();
1362        for (dest_vfd_id, q) in &self.in_queue {
1363            if *dest_vfd_id == vfd_id {
1364                if let WlRecv::Vfd { id } = q {
1365                    to_delete.insert(*id);
1366                }
1367            }
1368        }
1369        for vfd_id in to_delete {
1370            // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1371            let _ = self.close(vfd_id);
1372        }
1373        match self.vfds.remove(&vfd_id) {
1374            Some(mut vfd) => {
1375                self.in_queue.retain(|&(id, _)| id != vfd_id);
1376                vfd.close()?;
1377                Ok(WlResp::Ok)
1378            }
1379            None => Ok(WlResp::InvalidId),
1380        }
1381    }
1382
1383    #[cfg(feature = "gpu")]
1384    fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
1385        let sock = self.resource_bridge.as_ref().unwrap();
1386        match get_resource_info(sock, request) {
1387            Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
1388            Ok(ResourceInfo::Fence { handle }) => Some(handle),
1389            Err(ResourceBridgeError::InvalidResource(req)) => {
1390                warn!("attempt to send non-existent gpu resource {}", req);
1391                None
1392            }
1393            Err(e) => {
1394                error!("{}", e);
1395                // If there was an error with the resource bridge, it can no longer be
1396                // trusted to continue to function.
1397                self.resource_bridge = None;
1398                None
1399            }
1400        }
1401    }
1402
1403    fn send(
1404        &mut self,
1405        vfd_id: u32,
1406        vfd_count: usize,
1407        foreign_id: bool,
1408        reader: &mut Reader,
1409    ) -> WlResult<WlResp> {
1410        // Validate vfd_count
1411        if vfd_count > VIRTWL_SEND_MAX_ALLOCS {
1412            warn!(
1413                "attempt to send more vfd's than VIRTWL_SEND_MAX_ALLOCS: {}",
1414                vfd_count
1415            );
1416            return Ok(WlResp::InvalidCommand);
1417        }
1418
1419        // First stage gathers and normalizes all id information from guest memory.
1420        let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1421            kind: Le32::from(0),
1422            payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1423        }; VIRTWL_SEND_MAX_ALLOCS];
1424        for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1425            *vfd_id = if foreign_id {
1426                if self.use_send_vfd_v2 {
1427                    reader.read_obj().map_err(WlError::ParseDesc)?
1428                } else {
1429                    let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1430                    CtrlVfdSendVfdV2 {
1431                        kind: vfd.kind,
1432                        payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1433                    }
1434                }
1435            } else {
1436                CtrlVfdSendVfdV2 {
1437                    kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1438                    payload: CtrlVfdSendVfdV2Payload {
1439                        id: reader.read_obj().map_err(WlError::ParseDesc)?,
1440                    },
1441                }
1442            };
1443        }
1444
1445        // Next stage collects corresponding file descriptors for each id.
1446        let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1447        #[cfg(feature = "gpu")]
1448        let mut bridged_files = Vec::new();
1449        for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1450            match send_vfd_id.kind.to_native() {
1451                VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1452                    match self.vfds.get(&send_vfd_id.id().to_native()) {
1453                        Some(vfd) => match vfd.send_descriptor() {
1454                            Some(vfd_fd) => *descriptor = vfd_fd,
1455                            None => return Ok(WlResp::InvalidType),
1456                        },
1457                        None => {
1458                            warn!(
1459                                "attempt to send non-existant vfd 0x{:08x}",
1460                                send_vfd_id.id().to_native()
1461                            );
1462                            return Ok(WlResp::InvalidId);
1463                        }
1464                    }
1465                }
1466                #[cfg(feature = "gpu")]
1467                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1468                    match self.get_info(ResourceRequest::GetBuffer {
1469                        id: send_vfd_id.id().to_native(),
1470                    }) {
1471                        Some(handle) => {
1472                            *descriptor = handle.as_raw_descriptor();
1473                            bridged_files.push(handle.into());
1474                        }
1475                        None => return Ok(WlResp::InvalidId),
1476                    }
1477                }
1478                #[cfg(feature = "gpu")]
1479                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1480                    match self.get_info(ResourceRequest::GetFence {
1481                        seqno: send_vfd_id.seqno().to_native(),
1482                    }) {
1483                        Some(handle) => {
1484                            *descriptor = handle.as_raw_descriptor();
1485                            bridged_files.push(handle.into());
1486                        }
1487                        None => return Ok(WlResp::InvalidId),
1488                    }
1489                }
1490                #[cfg(feature = "gpu")]
1491                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1492                    if self.resource_bridge.is_some() =>
1493                {
1494                    if self.signaled_fence.is_none() {
1495                        // If the guest is sending a signaled fence, we know a fence
1496                        // with seqno 0 must already be signaled.
1497                        match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1498                            Some(handle) => self.signaled_fence = Some(handle),
1499                            None => return Ok(WlResp::InvalidId),
1500                        }
1501                    }
1502                    match self.signaled_fence.as_ref().unwrap().try_clone() {
1503                        Ok(dup) => {
1504                            *descriptor = dup.into_raw_descriptor();
1505                            // SAFETY:
1506                            // Safe because the fd comes from a valid SafeDescriptor.
1507                            let file: File = unsafe {
1508                                base::FromRawDescriptor::from_raw_descriptor(*descriptor)
1509                            };
1510                            bridged_files.push(file);
1511                        }
1512                        Err(_) => return Ok(WlResp::InvalidId),
1513                    }
1514                }
1515                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1516                | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1517                | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1518                    let _ = self.resource_bridge.as_ref();
1519                    warn!("attempt to send foreign resource kind but feature is disabled");
1520                }
1521                kind => {
1522                    warn!("attempt to send unknown foreign resource kind: {}", kind);
1523                    return Ok(WlResp::InvalidId);
1524                }
1525            }
1526        }
1527
1528        // Final stage sends file descriptors and data to the target vfd's socket.
1529        match self.vfds.get_mut(&vfd_id) {
1530            Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1531                WlResp::Ok => {}
1532                _ => return Ok(WlResp::InvalidType),
1533            },
1534            None => return Ok(WlResp::InvalidId),
1535        }
1536        // The vfds with remote FDs need to be closed so that the local side can receive
1537        // hangup events.
1538        for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1539            if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1540                if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1541                    vfd.close_remote();
1542                }
1543            }
1544        }
1545        Ok(WlResp::Ok)
1546    }
1547
1548    fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1549        let buf = match self.vfds.get_mut(&vfd_id) {
1550            Some(vfd) => {
1551                if vfd.is_fence {
1552                    if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1553                        warn!("failed to remove hungup vfd from poll context: {}", e);
1554                    }
1555                    self.in_queue.push_back((vfd_id, WlRecv::Hup));
1556                    return Ok(());
1557                } else {
1558                    vfd.recv(&mut self.in_file_queue)?
1559                }
1560            }
1561            None => return Ok(()),
1562        };
1563
1564        if self.in_file_queue.is_empty() && buf.is_empty() {
1565            self.in_queue.push_back((vfd_id, WlRecv::Hup));
1566            return Ok(());
1567        }
1568        for file in self.in_file_queue.drain(..) {
1569            let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1570            if let Some(wait_descriptor) = vfd.wait_descriptor() {
1571                self.wait_ctx
1572                    .add(wait_descriptor, self.next_vfd_id)
1573                    .map_err(WlError::WaitContextAdd)?;
1574            }
1575            // Only necessary if we somehow wrap the id counter. The try_insert
1576            // API would be nicer, but that's currently experimental.
1577            while self.vfds.contains_key(&self.next_vfd_id) {
1578                self.next_vfd_id += 1;
1579            }
1580            self.vfds.insert(self.next_vfd_id, vfd);
1581            self.in_queue.push_back((
1582                vfd_id,
1583                WlRecv::Vfd {
1584                    id: self.next_vfd_id,
1585                },
1586            ));
1587            self.next_vfd_id += 1;
1588        }
1589        self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1590
1591        Ok(())
1592    }
1593
1594    fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1595        let type_: Le32 = reader.peek_obj::<Le32>().map_err(WlError::ParseDesc)?;
1596        match type_.into() {
1597            VIRTIO_WL_CMD_VFD_NEW => {
1598                let ctrl = reader
1599                    .read_obj::<CtrlVfdNew>()
1600                    .map_err(WlError::ParseDesc)?;
1601                self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1602            }
1603            VIRTIO_WL_CMD_VFD_CLOSE => {
1604                let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1605                self.close(ctrl.id.into())
1606            }
1607            VIRTIO_WL_CMD_VFD_SEND => {
1608                let ctrl = reader
1609                    .read_obj::<CtrlVfdSend>()
1610                    .map_err(WlError::ParseDesc)?;
1611                let foreign_id = false;
1612                self.send(
1613                    ctrl.id.into(),
1614                    ctrl.vfd_count.to_native() as usize,
1615                    foreign_id,
1616                    reader,
1617                )
1618            }
1619            #[cfg(feature = "gpu")]
1620            VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1621                let ctrl = reader
1622                    .read_obj::<CtrlVfdSend>()
1623                    .map_err(WlError::ParseDesc)?;
1624                let foreign_id = true;
1625                self.send(
1626                    ctrl.id.into(),
1627                    ctrl.vfd_count.to_native() as usize,
1628                    foreign_id,
1629                    reader,
1630                )
1631            }
1632            VIRTIO_WL_CMD_VFD_NEW_CTX => {
1633                let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1634                self.new_context(ctrl.id.into(), "")
1635            }
1636            VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1637                let ctrl = reader
1638                    .read_obj::<CtrlVfdNew>()
1639                    .map_err(WlError::ParseDesc)?;
1640                self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1641            }
1642            #[cfg(feature = "gbm")]
1643            VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1644                let ctrl = reader
1645                    .read_obj::<CtrlVfdNewDmabuf>()
1646                    .map_err(WlError::ParseDesc)?;
1647                self.new_dmabuf(
1648                    ctrl.id.into(),
1649                    ctrl.width.into(),
1650                    ctrl.height.into(),
1651                    ctrl.format.into(),
1652                )
1653            }
1654            #[cfg(feature = "gbm")]
1655            VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1656                let ctrl = reader
1657                    .read_obj::<CtrlVfdDmabufSync>()
1658                    .map_err(WlError::ParseDesc)?;
1659                self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1660            }
1661            VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1662                let ctrl = reader
1663                    .read_obj::<CtrlVfdNewCtxNamed>()
1664                    .map_err(WlError::ParseDesc)?;
1665                let name_len = ctrl
1666                    .name
1667                    .iter()
1668                    .position(|x| x == &0)
1669                    .unwrap_or(ctrl.name.len());
1670                let name =
1671                    std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1672                self.new_context(ctrl.id.into(), name)
1673            }
1674            op_type => {
1675                warn!("unexpected command {}", op_type);
1676                Ok(WlResp::InvalidCommand)
1677            }
1678        }
1679    }
1680
1681    fn next_recv(&self) -> Option<WlResp> {
1682        if let Some(q) = self.in_queue.front() {
1683            match *q {
1684                (vfd_id, WlRecv::Vfd { id }) => {
1685                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1686                        match self.vfds.get(&id) {
1687                            Some(vfd) => Some(WlResp::VfdNew {
1688                                id,
1689                                flags: vfd.flags(self.use_transition_flags),
1690                                pfn: self.compute_pfn(&vfd.offset()),
1691                                size: vfd.size().unwrap_or_default() as u32,
1692                                resp: false,
1693                            }),
1694                            _ => Some(WlResp::VfdNew {
1695                                id,
1696                                flags: 0,
1697                                pfn: 0,
1698                                size: 0,
1699                                resp: false,
1700                            }),
1701                        }
1702                    } else {
1703                        Some(WlResp::VfdRecv {
1704                            id: self.current_recv_vfd.unwrap(),
1705                            data: &[],
1706                            vfds: &self.recv_vfds[..],
1707                        })
1708                    }
1709                }
1710                (vfd_id, WlRecv::Data { ref buf }) => {
1711                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1712                        Some(WlResp::VfdRecv {
1713                            id: vfd_id,
1714                            data: &buf[..],
1715                            vfds: &self.recv_vfds[..],
1716                        })
1717                    } else {
1718                        Some(WlResp::VfdRecv {
1719                            id: self.current_recv_vfd.unwrap(),
1720                            data: &[],
1721                            vfds: &self.recv_vfds[..],
1722                        })
1723                    }
1724                }
1725                (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1726            }
1727        } else {
1728            None
1729        }
1730    }
1731
1732    fn pop_recv(&mut self) {
1733        if let Some(q) = self.in_queue.front() {
1734            match *q {
1735                (vfd_id, WlRecv::Vfd { id }) => {
1736                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1737                        self.recv_vfds.push(id);
1738                        self.current_recv_vfd = Some(vfd_id);
1739                    } else {
1740                        self.recv_vfds.clear();
1741                        self.current_recv_vfd = None;
1742                        return;
1743                    }
1744                }
1745                (vfd_id, WlRecv::Data { .. }) => {
1746                    self.recv_vfds.clear();
1747                    self.current_recv_vfd = None;
1748                    if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1749                        return;
1750                    }
1751                }
1752                (_, WlRecv::Hup) => {
1753                    self.recv_vfds.clear();
1754                    self.current_recv_vfd = None;
1755                }
1756            }
1757        }
1758        self.in_queue.pop_front();
1759    }
1760
1761    fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
1762        let addr = match (offset, self.address_offset) {
1763            (Some(o), Some(address_offset)) => o + address_offset,
1764            (Some(o), None) => *o,
1765            // without shmem, 0 is the special address for "no_pfn"
1766            (None, Some(_)) => 0,
1767            // with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
1768            (None, None) => WL_SHMEM_SIZE,
1769        };
1770        addr >> VIRTIO_WL_PFN_SHIFT
1771    }
1772}
1773
1774#[derive(ThisError, Debug, PartialEq, Eq)]
1775#[error("no descriptors available in queue")]
1776pub struct DescriptorsExhausted;
1777
1778/// Handle incoming events and forward them to the VM over the input queue.
1779pub fn process_in_queue(
1780    in_queue: &mut Queue,
1781    state: &mut WlState,
1782) -> ::std::result::Result<(), DescriptorsExhausted> {
1783    state.process_wait_context();
1784
1785    let mut needs_interrupt = false;
1786    let mut exhausted_queue = false;
1787    loop {
1788        let mut desc = if let Some(d) = in_queue.peek() {
1789            d
1790        } else {
1791            exhausted_queue = true;
1792            break;
1793        };
1794
1795        let mut should_pop = false;
1796        if let Some(in_resp) = state.next_recv() {
1797            match encode_resp(&mut desc.writer, in_resp) {
1798                Ok(()) => {
1799                    should_pop = true;
1800                }
1801                Err(e) => {
1802                    error!("failed to encode response to descriptor chain: {}", e);
1803                }
1804            }
1805            needs_interrupt = true;
1806            let desc = desc.pop();
1807            in_queue.add_used(desc);
1808        } else {
1809            break;
1810        }
1811        if should_pop {
1812            state.pop_recv();
1813        }
1814    }
1815
1816    if needs_interrupt {
1817        in_queue.trigger_interrupt();
1818    }
1819
1820    if exhausted_queue {
1821        Err(DescriptorsExhausted)
1822    } else {
1823        Ok(())
1824    }
1825}
1826
1827/// Handle messages from the output queue and forward them to the display sever, if necessary.
1828pub fn process_out_queue(out_queue: &mut Queue, state: &mut WlState) {
1829    let mut needs_interrupt = false;
1830    while let Some(mut desc) = out_queue.pop() {
1831        let resp = match state.execute(&mut desc.reader) {
1832            Ok(r) => r,
1833            Err(e) => WlResp::Err(Box::new(e)),
1834        };
1835
1836        match encode_resp(&mut desc.writer, resp) {
1837            Ok(()) => {}
1838            Err(e) => {
1839                error!("failed to encode response to descriptor chain: {}", e);
1840            }
1841        }
1842
1843        out_queue.add_used(desc);
1844        needs_interrupt = true;
1845    }
1846
1847    if needs_interrupt {
1848        out_queue.trigger_interrupt();
1849    }
1850}
1851
1852struct Worker {
1853    in_queue: Queue,
1854    out_queue: Queue,
1855    state: WlState,
1856}
1857
1858impl Worker {
1859    fn new(
1860        in_queue: Queue,
1861        out_queue: Queue,
1862        wayland_paths: BTreeMap<String, PathBuf>,
1863        mapper: Box<dyn SharedMemoryMapper>,
1864        use_transition_flags: bool,
1865        use_send_vfd_v2: bool,
1866        resource_bridge: Option<Tube>,
1867        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
1868        address_offset: Option<u64>,
1869    ) -> Worker {
1870        Worker {
1871            in_queue,
1872            out_queue,
1873            state: WlState::new(
1874                wayland_paths,
1875                mapper,
1876                use_transition_flags,
1877                use_send_vfd_v2,
1878                resource_bridge,
1879                #[cfg(feature = "gbm")]
1880                gralloc,
1881                address_offset,
1882            ),
1883        }
1884    }
1885
1886    fn run(&mut self, kill_evt: Event) -> anyhow::Result<()> {
1887        #[derive(EventToken)]
1888        enum Token {
1889            InQueue,
1890            OutQueue,
1891            Kill,
1892            State,
1893        }
1894
1895        let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
1896            (self.in_queue.event(), Token::InQueue),
1897            (self.out_queue.event(), Token::OutQueue),
1898            (&kill_evt, Token::Kill),
1899            (&self.state.wait_ctx, Token::State),
1900        ])
1901        .context("failed creating WaitContext")?;
1902
1903        let mut watching_state_ctx = true;
1904        'wait: loop {
1905            let events = match wait_ctx.wait() {
1906                Ok(v) => v,
1907                Err(e) => {
1908                    error!("failed waiting for events: {}", e);
1909                    break;
1910                }
1911            };
1912
1913            for event in &events {
1914                match event.token {
1915                    Token::InQueue => {
1916                        let _ = self.in_queue.event().wait();
1917                        if !watching_state_ctx {
1918                            if let Err(e) =
1919                                wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1920                            {
1921                                error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1922                                break;
1923                            }
1924                            watching_state_ctx = true;
1925                        }
1926                    }
1927                    Token::OutQueue => {
1928                        let _ = self.out_queue.event().wait();
1929                        process_out_queue(&mut self.out_queue, &mut self.state);
1930                    }
1931                    Token::Kill => break 'wait,
1932                    Token::State => {
1933                        if let Err(DescriptorsExhausted) =
1934                            process_in_queue(&mut self.in_queue, &mut self.state)
1935                        {
1936                            if let Err(e) =
1937                                wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1938                            {
1939                                error!(
1940                                    "Failed to stop watching wait_ctx descriptor for WlState: {}",
1941                                    e
1942                                );
1943                                break;
1944                            }
1945                            watching_state_ctx = false;
1946                        }
1947                    }
1948                }
1949            }
1950        }
1951
1952        Ok(())
1953    }
1954}
1955
1956pub struct Wl {
1957    worker_thread: Option<WorkerThread<BTreeMap<usize, Queue>>>,
1958    wayland_paths: BTreeMap<String, PathBuf>,
1959    mapper: Option<Box<dyn SharedMemoryMapper>>,
1960    resource_bridge: Option<Tube>,
1961    base_features: u64,
1962    acked_features: u64,
1963    #[cfg(feature = "gbm")]
1964    gralloc: Option<RutabagaGralloc>,
1965    address_offset: Option<u64>,
1966}
1967
1968impl Wl {
1969    pub fn new(
1970        base_features: u64,
1971        wayland_paths: BTreeMap<String, PathBuf>,
1972        resource_bridge: Option<Tube>,
1973    ) -> Result<Wl> {
1974        Ok(Wl {
1975            worker_thread: None,
1976            wayland_paths,
1977            mapper: None,
1978            resource_bridge,
1979            base_features,
1980            acked_features: 0,
1981            #[cfg(feature = "gbm")]
1982            gralloc: None,
1983            address_offset: None,
1984        })
1985    }
1986}
1987
1988impl VirtioDevice for Wl {
1989    fn keep_rds(&self) -> Vec<RawDescriptor> {
1990        let mut keep_rds = Vec::new();
1991
1992        if let Some(mapper) = &self.mapper {
1993            if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
1994                keep_rds.push(raw_descriptor);
1995            }
1996        }
1997        if let Some(resource_bridge) = &self.resource_bridge {
1998            keep_rds.push(resource_bridge.as_raw_descriptor());
1999        }
2000        keep_rds
2001    }
2002
2003    #[cfg(feature = "gbm")]
2004    fn on_device_sandboxed(&mut self) {
2005        // Gralloc initialization can cause some GPU drivers to create their own threads
2006        // and that must be done after sandboxing.
2007        match RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()) {
2008            Ok(g) => self.gralloc = Some(g),
2009            Err(e) => {
2010                error!("failed to initialize gralloc {:?}", e);
2011            }
2012        };
2013    }
2014
2015    fn device_type(&self) -> DeviceType {
2016        DeviceType::Wl
2017    }
2018
2019    fn queue_max_sizes(&self) -> &[u16] {
2020        QUEUE_SIZES
2021    }
2022
2023    fn features(&self) -> u64 {
2024        self.base_features
2025            | 1 << VIRTIO_WL_F_TRANS_FLAGS
2026            | 1 << VIRTIO_WL_F_SEND_FENCES
2027            | 1 << VIRTIO_WL_F_USE_SHMEM
2028    }
2029
2030    fn ack_features(&mut self, value: u64) {
2031        self.acked_features |= value;
2032    }
2033
2034    fn activate(
2035        &mut self,
2036        _mem: GuestMemory,
2037        _interrupt: Interrupt,
2038        mut queues: BTreeMap<usize, Queue>,
2039    ) -> anyhow::Result<()> {
2040        if queues.len() != QUEUE_SIZES.len() {
2041            return Err(anyhow!(
2042                "expected {} queues, got {}",
2043                QUEUE_SIZES.len(),
2044                queues.len()
2045            ));
2046        }
2047
2048        let mapper = self.mapper.take().context("missing mapper")?;
2049
2050        let wayland_paths = self.wayland_paths.clone();
2051        let use_transition_flags = self.acked_features & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0;
2052        let use_send_vfd_v2 = self.acked_features & (1 << VIRTIO_WL_F_SEND_FENCES) != 0;
2053        let use_shmem = self.acked_features & (1 << VIRTIO_WL_F_USE_SHMEM) != 0;
2054        let resource_bridge = self.resource_bridge.take();
2055        #[cfg(feature = "gbm")]
2056        let gralloc = self
2057            .gralloc
2058            .take()
2059            .expect("gralloc already passed to worker");
2060        let address_offset = if !use_shmem {
2061            self.address_offset
2062        } else {
2063            None
2064        };
2065
2066        self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
2067            let mut worker = Worker::new(
2068                queues.pop_first().unwrap().1,
2069                queues.pop_first().unwrap().1,
2070                wayland_paths,
2071                mapper,
2072                use_transition_flags,
2073                use_send_vfd_v2,
2074                resource_bridge,
2075                #[cfg(feature = "gbm")]
2076                gralloc,
2077                address_offset,
2078            );
2079            if let Err(e) = worker.run(kill_evt) {
2080                error!("wl worker failed: {e:#}");
2081            }
2082            BTreeMap::from_iter([worker.in_queue, worker.out_queue].into_iter().enumerate())
2083        }));
2084
2085        Ok(())
2086    }
2087
2088    fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
2089        Some(SharedMemoryRegion {
2090            id: WL_SHMEM_ID,
2091            length: WL_SHMEM_SIZE,
2092        })
2093    }
2094
2095    fn set_shared_memory_region(&mut self, shmem_region: AddressRange) {
2096        self.address_offset = Some(shmem_region.start);
2097    }
2098
2099    fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
2100        self.mapper = Some(mapper);
2101    }
2102
2103    fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
2104        if let Some(worker_thread) = self.worker_thread.take() {
2105            let queues = worker_thread.stop();
2106            return Ok(Some(queues));
2107        }
2108        Ok(None)
2109    }
2110
2111    fn virtio_wake(
2112        &mut self,
2113        device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
2114    ) -> anyhow::Result<()> {
2115        match device_state {
2116            None => Ok(()),
2117            Some((mem, interrupt, queues)) => {
2118                // TODO: activate is just what we want at the moment, but we should probably move
2119                // it into a "start workers" function to make it obvious that it isn't strictly
2120                // used for activate events.
2121                self.activate(mem, interrupt, queues)?;
2122                Ok(())
2123            }
2124        }
2125    }
2126}