devices/virtio/
wl.rs

1// Copyright 2017 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! This module implements the virtio wayland used by the guest to access the host's wayland server.
6//!
7//! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8//! sending commands to the guest that are generated by the host, usually messages from the wayland
9//! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10//! memory, open a wayland server connection, or send data over an existing connection.
11//!
12//! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13//! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14//! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15//! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16//! well as a page frame number that the guest can access the memory from.
17//!
18//! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19//! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20//! `WlResp` for responses.
21//!
22//! There is one `WlState` instance that contains every known vfd and the current state of `in`
23//! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24//! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25//! requests, such as those registering guest memory.
26//!
27//! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28//! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29//! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31use std::cell::RefCell;
32use std::collections::btree_map::Entry;
33use std::collections::BTreeMap;
34use std::collections::BTreeSet;
35use std::collections::VecDeque;
36use std::convert::From;
37use std::error::Error as StdError;
38use std::fmt;
39use std::fs::File;
40use std::io;
41use std::io::Read;
42use std::io::Seek;
43use std::io::SeekFrom;
44use std::io::Write;
45use std::mem::size_of;
46#[cfg(feature = "gbm")]
47use std::os::raw::c_uint;
48#[cfg(feature = "gbm")]
49use std::os::raw::c_ulonglong;
50use std::os::unix::net::UnixStream;
51use std::path::Path;
52use std::path::PathBuf;
53use std::rc::Rc;
54use std::result;
55use std::time::Duration;
56
57use anyhow::anyhow;
58use anyhow::Context;
59use base::error;
60#[cfg(feature = "gbm")]
61use base::ioctl_iow_nr;
62use base::ioctl_iowr_nr;
63use base::ioctl_with_ref;
64use base::linux::SharedMemoryLinux;
65use base::pagesize;
66use base::pipe;
67use base::round_up_to_page_size;
68use base::unix::FileFlags;
69use base::warn;
70use base::AsRawDescriptor;
71use base::Error;
72use base::Event;
73use base::EventToken;
74use base::EventType;
75#[cfg(feature = "gpu")]
76use base::IntoRawDescriptor;
77#[cfg(feature = "gbm")]
78use base::MemoryMappingBuilder;
79#[cfg(feature = "gbm")]
80use base::MmapError;
81use base::Protection;
82use base::RawDescriptor;
83use base::Result;
84use base::SafeDescriptor;
85use base::ScmSocket;
86use base::SharedMemory;
87use base::Tube;
88use base::TubeError;
89use base::VolatileMemoryError;
90use base::WaitContext;
91use base::WorkerThread;
92use data_model::Le32;
93use data_model::Le64;
94use hypervisor::MemCacheType;
95#[cfg(feature = "gbm")]
96use libc::EBADF;
97#[cfg(feature = "gbm")]
98use libc::EINVAL;
99#[cfg(feature = "gbm")]
100use libc::ENOSYS;
101use remain::sorted;
102use resources::address_allocator::AddressAllocator;
103use resources::AddressRange;
104use resources::Alloc;
105#[cfg(feature = "gbm")]
106use rutabaga_gfx::DrmFormat;
107#[cfg(feature = "gbm")]
108use rutabaga_gfx::ImageAllocationInfo;
109#[cfg(feature = "gbm")]
110use rutabaga_gfx::ImageMemoryRequirements;
111#[cfg(feature = "gbm")]
112use rutabaga_gfx::RutabagaDescriptor;
113#[cfg(feature = "gbm")]
114use rutabaga_gfx::RutabagaError;
115#[cfg(feature = "gbm")]
116use rutabaga_gfx::RutabagaGralloc;
117#[cfg(feature = "gbm")]
118use rutabaga_gfx::RutabagaGrallocBackendFlags;
119#[cfg(feature = "gbm")]
120use rutabaga_gfx::RutabagaGrallocFlags;
121#[cfg(feature = "gbm")]
122use rutabaga_gfx::RutabagaIntoRawDescriptor;
123#[cfg(feature = "gbm")]
124use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
125#[cfg(feature = "gbm")]
126use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
127use static_assertions::const_assert_eq;
128use thiserror::Error as ThisError;
129use vm_control::VmMemorySource;
130use vm_memory::GuestMemory;
131use vm_memory::GuestMemoryError;
132use zerocopy::FromBytes;
133use zerocopy::Immutable;
134use zerocopy::IntoBytes;
135use zerocopy::KnownLayout;
136
137#[cfg(feature = "gpu")]
138use super::resource_bridge::get_resource_info;
139#[cfg(feature = "gpu")]
140use super::resource_bridge::BufferInfo;
141#[cfg(feature = "gpu")]
142use super::resource_bridge::ResourceBridgeError;
143#[cfg(feature = "gpu")]
144use super::resource_bridge::ResourceInfo;
145#[cfg(feature = "gpu")]
146use super::resource_bridge::ResourceRequest;
147use super::DeviceType;
148use super::Interrupt;
149use super::Queue;
150use super::Reader;
151use super::SharedMemoryMapper;
152use super::SharedMemoryRegion;
153use super::VirtioDevice;
154use super::Writer;
155use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
156use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
157use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
158
159const QUEUE_SIZE: u16 = 256;
160const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
161
162const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
163const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
164const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
165const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
166const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
167const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
168const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
169const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
170#[cfg(feature = "gbm")]
171const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
172#[cfg(feature = "gbm")]
173const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
174#[cfg(feature = "gpu")]
175const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
176const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
177const VIRTIO_WL_RESP_OK: u32 = 4096;
178const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
179#[cfg(feature = "gbm")]
180const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
181const VIRTIO_WL_RESP_ERR: u32 = 4352;
182const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
183const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
184const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
185const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
186const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
187const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
188const VIRTIO_WL_VFD_READ: u32 = 0x2;
189const VIRTIO_WL_VFD_MAP: u32 = 0x2;
190const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
191const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
192
193const NEXT_VFD_ID_BASE: u32 = 0x40000000;
194const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
195// Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
196// number of allocs.
197const IN_BUFFER_LEN: usize =
198    0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
199
200#[cfg(feature = "gbm")]
201const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
202
203#[cfg(feature = "gbm")]
204const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
205#[cfg(feature = "gbm")]
206const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
207#[cfg(feature = "gbm")]
208const DMA_BUF_SYNC_END: c_uint = 0x4;
209
210#[cfg(feature = "gbm")]
211#[repr(C)]
212#[derive(Copy, Clone)]
213struct dma_buf_sync {
214    flags: c_ulonglong,
215}
216
217#[cfg(feature = "gbm")]
218ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
219
220#[repr(C)]
221#[derive(Copy, Clone, Default)]
222struct sync_file_info {
223    name: [u8; 32],
224    status: i32,
225    flags: u32,
226    num_fences: u32,
227    pad: u32,
228    sync_fence_info: u64,
229}
230
231ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
232
233fn is_fence(f: &File) -> bool {
234    let info = sync_file_info::default();
235    // SAFETY:
236    // Safe as f is a valid file
237    unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO, &info) == 0 }
238}
239
240#[cfg(feature = "gbm")]
241#[derive(Debug, Default)]
242struct GpuMemoryPlaneDesc {
243    stride: u32,
244    offset: u32,
245}
246
247#[cfg(feature = "gbm")]
248#[derive(Debug, Default)]
249struct GpuMemoryDesc {
250    planes: [GpuMemoryPlaneDesc; 3],
251}
252
253const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
254const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
255const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
256const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
257
258const VIRTIO_WL_PFN_SHIFT: u32 = 12;
259
260fn encode_vfd_new(
261    writer: &mut Writer,
262    resp: bool,
263    vfd_id: u32,
264    flags: u32,
265    pfn: u64,
266    size: u32,
267) -> WlResult<()> {
268    let ctrl_vfd_new = CtrlVfdNew {
269        hdr: CtrlHeader {
270            type_: Le32::from(if resp {
271                VIRTIO_WL_RESP_VFD_NEW
272            } else {
273                VIRTIO_WL_CMD_VFD_NEW
274            }),
275            flags: Le32::from(0),
276        },
277        id: Le32::from(vfd_id),
278        flags: Le32::from(flags),
279        pfn: Le64::from(pfn),
280        size: Le32::from(size),
281        padding: Default::default(),
282    };
283
284    writer
285        .write_obj(ctrl_vfd_new)
286        .map_err(WlError::WriteResponse)
287}
288
289#[cfg(feature = "gbm")]
290fn encode_vfd_new_dmabuf(
291    writer: &mut Writer,
292    vfd_id: u32,
293    flags: u32,
294    pfn: u64,
295    size: u32,
296    desc: GpuMemoryDesc,
297) -> WlResult<()> {
298    let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
299        hdr: CtrlHeader {
300            type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
301            flags: Le32::from(0),
302        },
303        id: Le32::from(vfd_id),
304        flags: Le32::from(flags),
305        pfn: Le64::from(pfn),
306        size: Le32::from(size),
307        width: Le32::from(0),
308        height: Le32::from(0),
309        format: Le32::from(0),
310        stride0: Le32::from(desc.planes[0].stride),
311        stride1: Le32::from(desc.planes[1].stride),
312        stride2: Le32::from(desc.planes[2].stride),
313        offset0: Le32::from(desc.planes[0].offset),
314        offset1: Le32::from(desc.planes[1].offset),
315        offset2: Le32::from(desc.planes[2].offset),
316    };
317
318    writer
319        .write_obj(ctrl_vfd_new_dmabuf)
320        .map_err(WlError::WriteResponse)
321}
322
323fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
324    let ctrl_vfd_recv = CtrlVfdRecv {
325        hdr: CtrlHeader {
326            type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
327            flags: Le32::from(0),
328        },
329        id: Le32::from(vfd_id),
330        vfd_count: Le32::from(vfd_ids.len() as u32),
331    };
332    writer
333        .write_obj(ctrl_vfd_recv)
334        .map_err(WlError::WriteResponse)?;
335
336    for &recv_vfd_id in vfd_ids.iter() {
337        writer
338            .write_obj(Le32::from(recv_vfd_id))
339            .map_err(WlError::WriteResponse)?;
340    }
341
342    writer.write_all(data).map_err(WlError::WriteResponse)
343}
344
345fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
346    let ctrl_vfd_new = CtrlVfd {
347        hdr: CtrlHeader {
348            type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
349            flags: Le32::from(0),
350        },
351        id: Le32::from(vfd_id),
352    };
353
354    writer
355        .write_obj(ctrl_vfd_new)
356        .map_err(WlError::WriteResponse)
357}
358
359fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
360    match resp {
361        WlResp::VfdNew {
362            id,
363            flags,
364            pfn,
365            size,
366            resp,
367        } => encode_vfd_new(writer, resp, id, flags, pfn, size),
368        #[cfg(feature = "gbm")]
369        WlResp::VfdNewDmabuf {
370            id,
371            flags,
372            pfn,
373            size,
374            desc,
375        } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
376        WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
377        WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
378        r => writer
379            .write_obj(Le32::from(r.get_code()))
380            .map_err(WlError::WriteResponse),
381    }
382}
383
384#[allow(dead_code)]
385#[sorted]
386#[derive(ThisError, Debug)]
387enum WlError {
388    #[error("overflow in calculation")]
389    CheckedOffset,
390    #[error("failed to synchronize DMABuf access: {0}")]
391    DmabufSync(io::Error),
392    #[error("failed to create shared memory from descriptor: {0}")]
393    FromSharedMemory(Error),
394    #[error("failed to get seals: {0}")]
395    GetSeals(Error),
396    #[error("gralloc error: {0}")]
397    #[cfg(feature = "gbm")]
398    GrallocError(#[from] RutabagaError),
399    #[error("access violation in guest memory: {0}")]
400    GuestMemory(#[from] GuestMemoryError),
401    #[error("invalid string: {0}")]
402    InvalidString(std::str::Utf8Error),
403    #[error("failed to create shared memory allocation: {0}")]
404    NewAlloc(Error),
405    #[error("failed to create pipe: {0}")]
406    NewPipe(Error),
407    #[error("error parsing descriptor: {0}")]
408    ParseDesc(io::Error),
409    #[error("failed to read a pipe: {0}")]
410    ReadPipe(io::Error),
411    #[error("failed to recv on a socket: {0}")]
412    RecvVfd(io::Error),
413    #[error("failed to send on a socket: {0}")]
414    SendVfd(io::Error),
415    #[error("shmem mapper failure: {0}")]
416    ShmemMapperError(anyhow::Error),
417    #[error("failed to connect socket: {0}")]
418    SocketConnect(io::Error),
419    #[error("failed to set socket as non-blocking: {0}")]
420    SocketNonBlock(io::Error),
421    #[error("unknown socket name: {0}")]
422    UnknownSocketName(String),
423    #[error("invalid response from parent VM")]
424    VmBadResponse,
425    #[error("failed to control parent VM: {0}")]
426    VmControl(TubeError),
427    #[error("access violating in guest volatile memory: {0}")]
428    VolatileMemory(#[from] VolatileMemoryError),
429    #[error("failed to listen to descriptor on wait context: {0}")]
430    WaitContextAdd(Error),
431    #[error("failed to write to a pipe: {0}")]
432    WritePipe(io::Error),
433    #[error("failed to write response: {0}")]
434    WriteResponse(io::Error),
435}
436
437type WlResult<T> = result::Result<T, WlError>;
438
439pub const WL_SHMEM_ID: u8 = 0;
440pub const WL_SHMEM_SIZE: u64 = 1 << 32;
441
442struct VmRequesterState {
443    mapper: Box<dyn SharedMemoryMapper>,
444    #[cfg(feature = "gbm")]
445    gralloc: RutabagaGralloc,
446
447    // Allocator for shm address space
448    address_allocator: AddressAllocator,
449
450    // Map of existing mappings in the shm address space
451    allocs: BTreeMap<u64 /* offset */, Alloc>,
452
453    // The id for the next shmem allocation
454    next_alloc: usize,
455}
456
457#[derive(Clone)]
458struct VmRequester {
459    state: Rc<RefCell<VmRequesterState>>,
460}
461
462// The following are wrappers to avoid base dependencies in the rutabaga crate
463#[cfg(feature = "gbm")]
464fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
465    // SAFETY:
466    // Safe because we own the SafeDescriptor at this point.
467    unsafe { base::FromRawDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
468}
469
470impl VmRequester {
471    fn new(
472        mapper: Box<dyn SharedMemoryMapper>,
473        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
474    ) -> VmRequester {
475        VmRequester {
476            state: Rc::new(RefCell::new(VmRequesterState {
477                mapper,
478                #[cfg(feature = "gbm")]
479                gralloc,
480                address_allocator: AddressAllocator::new(
481                    AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
482                    Some(pagesize() as u64),
483                    None,
484                )
485                .expect("failed to create allocator"),
486                allocs: BTreeMap::new(),
487                next_alloc: 0,
488            })),
489        }
490    }
491
492    fn unregister_memory(&self, offset: u64) -> WlResult<()> {
493        let mut state = self.state.borrow_mut();
494        state
495            .mapper
496            .remove_mapping(offset)
497            .map_err(WlError::ShmemMapperError)?;
498        let alloc = state
499            .allocs
500            .remove(&offset)
501            .context("unknown offset")
502            .map_err(WlError::ShmemMapperError)?;
503        state
504            .address_allocator
505            .release(alloc)
506            .expect("corrupt address space");
507        Ok(())
508    }
509
510    #[cfg(feature = "gbm")]
511    fn allocate_and_register_gpu_memory(
512        &self,
513        width: u32,
514        height: u32,
515        format: u32,
516    ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
517        let mut state = self.state.borrow_mut();
518
519        let img = ImageAllocationInfo {
520            width,
521            height,
522            drm_format: DrmFormat::from(format),
523            // Linear layout is a requirement as virtio wayland guest expects
524            // this for CPU access to the buffer. Scanout and texturing are
525            // optional as the consumer (wayland compositor) is expected to
526            // fall-back to a less efficient mechanisms for presentation if
527            // neccesary. In practice, linear buffers for commonly used formats
528            // will also support scanout and texturing.
529            flags: RutabagaGrallocFlags::empty().use_linear(true),
530        };
531
532        let reqs = state
533            .gralloc
534            .get_image_memory_requirements(img)
535            .map_err(WlError::GrallocError)?;
536        let handle = state
537            .gralloc
538            .allocate_memory(reqs)
539            .map_err(WlError::GrallocError)?;
540        drop(state);
541
542        let safe_descriptor = to_safe_descriptor(handle.os_handle);
543        self.register_memory(
544            safe_descriptor
545                .try_clone()
546                .context("failed to dup gfx handle")
547                .map_err(WlError::ShmemMapperError)?,
548            reqs.size,
549            Protection::read_write(),
550        )
551        .map(|info| (info, safe_descriptor, reqs))
552    }
553
554    fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
555        let prot = match FileFlags::from_file(shm) {
556            Ok(FileFlags::Read) => Protection::read(),
557            Ok(FileFlags::Write) => Protection::write(),
558            Ok(FileFlags::ReadWrite) => {
559                let seals = shm.get_seals().map_err(WlError::GetSeals)?;
560                if seals.write_seal() {
561                    Protection::read()
562                } else {
563                    Protection::read_write()
564                }
565            }
566            Err(e) => {
567                return Err(WlError::ShmemMapperError(anyhow!(
568                    "failed to get file descriptor flags with error: {:?}",
569                    e
570                )))
571            }
572        };
573        self.register_memory(
574            SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
575                .context("failed to create safe descriptor")
576                .map_err(WlError::ShmemMapperError)?,
577            shm.size(),
578            prot,
579        )
580    }
581
582    fn register_memory(
583        &self,
584        descriptor: SafeDescriptor,
585        size: u64,
586        prot: Protection,
587    ) -> WlResult<u64> {
588        let mut state = self.state.borrow_mut();
589        let size = round_up_to_page_size(size as usize) as u64;
590
591        let source = VmMemorySource::Descriptor {
592            descriptor,
593            offset: 0,
594            size,
595        };
596        let alloc = Alloc::Anon(state.next_alloc);
597        state.next_alloc += 1;
598        let offset = state
599            .address_allocator
600            .allocate(size, alloc, "virtio-wl".to_owned())
601            .context("failed to allocate offset")
602            .map_err(WlError::ShmemMapperError)?;
603
604        match state
605            .mapper
606            .add_mapping(source, offset, prot, MemCacheType::CacheCoherent)
607        {
608            Ok(()) => {
609                state.allocs.insert(offset, alloc);
610                Ok(offset)
611            }
612            Err(e) => {
613                // We just allocated it ourselves, it must exist.
614                state
615                    .address_allocator
616                    .release(alloc)
617                    .expect("corrupt address space");
618                Err(WlError::ShmemMapperError(e))
619            }
620        }
621    }
622}
623
624#[repr(C)]
625#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
626struct CtrlHeader {
627    type_: Le32,
628    flags: Le32,
629}
630
631#[repr(C)]
632#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
633struct CtrlVfdNew {
634    hdr: CtrlHeader,
635    id: Le32,
636    flags: Le32,
637    pfn: Le64,
638    size: Le32,
639    padding: Le32,
640}
641
642#[repr(C)]
643#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
644struct CtrlVfdNewCtxNamed {
645    hdr: CtrlHeader,
646    id: Le32,
647    flags: Le32, // Ignored.
648    pfn: Le64,   // Ignored.
649    size: Le32,  // Ignored.
650    name: [u8; 32],
651    _pad: u32,
652}
653const_assert_eq!(size_of::<CtrlVfdNewCtxNamed>(), 64);
654
655#[repr(C)]
656#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
657#[cfg(feature = "gbm")]
658struct CtrlVfdNewDmabuf {
659    hdr: CtrlHeader,
660    id: Le32,
661    flags: Le32,
662    pfn: Le64,
663    size: Le32,
664    width: Le32,
665    height: Le32,
666    format: Le32,
667    stride0: Le32,
668    stride1: Le32,
669    stride2: Le32,
670    offset0: Le32,
671    offset1: Le32,
672    offset2: Le32,
673}
674
675#[cfg(feature = "gbm")]
676#[repr(C)]
677#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
678struct CtrlVfdDmabufSync {
679    hdr: CtrlHeader,
680    id: Le32,
681    flags: Le32,
682}
683
684#[repr(C)]
685#[derive(Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)]
686struct CtrlVfdRecv {
687    hdr: CtrlHeader,
688    id: Le32,
689    vfd_count: Le32,
690}
691
692#[repr(C)]
693#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
694struct CtrlVfd {
695    hdr: CtrlHeader,
696    id: Le32,
697}
698
699#[repr(C)]
700#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
701struct CtrlVfdSend {
702    hdr: CtrlHeader,
703    id: Le32,
704    vfd_count: Le32,
705    // Remainder is an array of vfd_count IDs followed by data.
706}
707
708#[repr(C)]
709#[derive(Copy, Clone, Default, FromBytes, Immutable, IntoBytes, KnownLayout)]
710struct CtrlVfdSendVfd {
711    kind: Le32,
712    id: Le32,
713}
714
715#[repr(C)]
716#[derive(Copy, Clone, FromBytes, Immutable, KnownLayout)]
717union CtrlVfdSendVfdV2Payload {
718    id: Le32,
719    seqno: Le64,
720}
721
722#[repr(C)]
723#[derive(Copy, Clone, FromBytes, Immutable, KnownLayout)]
724struct CtrlVfdSendVfdV2 {
725    kind: Le32,
726    payload: CtrlVfdSendVfdV2Payload,
727}
728
729impl CtrlVfdSendVfdV2 {
730    fn id(&self) -> Le32 {
731        assert!(
732            self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
733                || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
734        );
735        // SAFETY: trivially safe given we assert kind
736        unsafe { self.payload.id }
737    }
738    #[cfg(feature = "gpu")]
739    fn seqno(&self) -> Le64 {
740        assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
741        // SAFETY: trivially safe given we assert kind
742        unsafe { self.payload.seqno }
743    }
744}
745
746#[derive(Debug)]
747#[allow(dead_code)]
748enum WlResp<'a> {
749    Ok,
750    VfdNew {
751        id: u32,
752        flags: u32,
753        pfn: u64,
754        size: u32,
755        // The VfdNew variant can be either a response or a command depending on this `resp`. This
756        // is important for the `get_code` method.
757        resp: bool,
758    },
759    #[cfg(feature = "gbm")]
760    VfdNewDmabuf {
761        id: u32,
762        flags: u32,
763        pfn: u64,
764        size: u32,
765        desc: GpuMemoryDesc,
766    },
767    VfdRecv {
768        id: u32,
769        data: &'a [u8],
770        vfds: &'a [u32],
771    },
772    VfdHup {
773        id: u32,
774    },
775    Err(Box<dyn StdError>),
776    OutOfMemory,
777    InvalidId,
778    InvalidType,
779    InvalidFlags,
780    InvalidCommand,
781}
782
783impl WlResp<'_> {
784    fn get_code(&self) -> u32 {
785        match *self {
786            WlResp::Ok => VIRTIO_WL_RESP_OK,
787            WlResp::VfdNew { resp, .. } => {
788                if resp {
789                    VIRTIO_WL_RESP_VFD_NEW
790                } else {
791                    VIRTIO_WL_CMD_VFD_NEW
792                }
793            }
794            #[cfg(feature = "gbm")]
795            WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
796            WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
797            WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
798            WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
799            WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
800            WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
801            WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
802            WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
803            WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
804        }
805    }
806}
807
808#[derive(Default)]
809struct WlVfd {
810    socket: Option<ScmSocket<UnixStream>>,
811    guest_shared_memory: Option<SharedMemory>,
812    remote_pipe: Option<File>,
813    local_pipe: Option<(u32 /* flags */, File)>,
814    slot: Option<(u64 /* offset */, VmRequester)>,
815    #[cfg(feature = "gbm")]
816    is_dmabuf: bool,
817    #[cfg(feature = "gbm")]
818    map_info: u32,
819    fence: Option<File>,
820    is_fence: bool,
821}
822
823impl fmt::Debug for WlVfd {
824    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
825        write!(f, "WlVfd {{")?;
826        if let Some(s) = &self.socket {
827            write!(f, " socket: {}", s.as_raw_descriptor())?;
828        }
829        if let Some((offset, _)) = &self.slot {
830            write!(f, " offset: {offset}")?;
831        }
832        if let Some(s) = &self.remote_pipe {
833            write!(f, " remote: {}", s.as_raw_descriptor())?;
834        }
835        if let Some((_, s)) = &self.local_pipe {
836            write!(f, " local: {}", s.as_raw_descriptor())?;
837        }
838        write!(f, " }}")
839    }
840}
841
842#[cfg(feature = "gbm")]
843fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
844    let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
845        .from_shared_memory(shared_memory)
846        .build()
847    {
848        Ok(v) => v,
849        Err(_) => return Err(Error::new(EINVAL)),
850    };
851    if let Err(err) = mmap.flush_all() {
852        base::error!("failed to flush shared memory: {}", err);
853        return match err {
854            MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
855            _ => Err(Error::new(EINVAL)),
856        };
857    }
858    Ok(())
859}
860
861impl WlVfd {
862    fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
863        let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
864        let mut vfd = WlVfd::default();
865        vfd.socket = Some(socket.try_into().map_err(WlError::SocketConnect)?);
866        Ok(vfd)
867    }
868
869    fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
870        let size_page_aligned = round_up_to_page_size(size as usize) as u64;
871        let vfd_shm =
872            SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
873
874        let offset = vm.register_shmem(&vfd_shm)?;
875
876        let mut vfd = WlVfd::default();
877        vfd.guest_shared_memory = Some(vfd_shm);
878        vfd.slot = Some((offset, vm));
879        Ok(vfd)
880    }
881
882    #[cfg(feature = "gbm")]
883    fn dmabuf(
884        vm: VmRequester,
885        width: u32,
886        height: u32,
887        format: u32,
888    ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
889        let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
890        let mut vfd = WlVfd::default();
891        let vfd_shm =
892            SharedMemory::from_safe_descriptor(desc, reqs.size).map_err(WlError::NewAlloc)?;
893
894        let mut desc = GpuMemoryDesc::default();
895        for i in 0..3 {
896            desc.planes[i] = GpuMemoryPlaneDesc {
897                stride: reqs.strides[i],
898                offset: reqs.offsets[i],
899            }
900        }
901
902        vfd.guest_shared_memory = Some(vfd_shm);
903        vfd.slot = Some((offset, vm));
904        vfd.is_dmabuf = true;
905        vfd.map_info = reqs.map_info;
906        Ok((vfd, desc))
907    }
908
909    #[cfg(feature = "gbm")]
910    fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
911        if !self.is_dmabuf {
912            return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
913        }
914
915        match &self.guest_shared_memory {
916            Some(descriptor) => {
917                let sync = dma_buf_sync {
918                    flags: flags as u64,
919                };
920                // SAFETY:
921                // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
922                if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC, &sync) } < 0 {
923                    return Err(WlError::DmabufSync(io::Error::last_os_error()));
924                }
925
926                // virtio-wl kernel driver always maps dmabufs with WB memory type, regardless of
927                // the host memory type (which is wrong). However, to avoid changing the protocol,
928                // assume that all guest writes are cached and ensure clflush-like ops on all mapped
929                // cachelines if the host mapping is not cached.
930                const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
931                if (flags & END_WRITE_MASK) == END_WRITE_MASK
932                    && (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
933                {
934                    if let Err(err) = flush_shared_memory(descriptor) {
935                        base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
936                        return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
937                            err.errno(),
938                        )));
939                    }
940                }
941                Ok(())
942            }
943            None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
944        }
945    }
946
947    fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
948        let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
949        let mut vfd = WlVfd::default();
950        vfd.remote_pipe = Some(read_pipe);
951        vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
952        Ok(vfd)
953    }
954
955    fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
956        let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
957        let mut vfd = WlVfd::default();
958        vfd.remote_pipe = Some(write_pipe);
959        vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
960        Ok(vfd)
961    }
962
963    fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
964        // We need to determine if the given file is more like shared memory or a pipe/socket. A
965        // quick and easy check is to seek to the end of the file. If it works we assume it's not a
966        // pipe/socket because those have no end. We can even use that seek location as an indicator
967        // for how big the shared memory chunk to map into guest memory is. If seeking to the end
968        // fails, we assume it's a socket or pipe with read/write semantics.
969        if descriptor.seek(SeekFrom::End(0)).is_ok() {
970            let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
971            let offset = vm.register_shmem(&shm)?;
972
973            let mut vfd = WlVfd::default();
974            vfd.guest_shared_memory = Some(shm);
975            vfd.slot = Some((offset, vm));
976            Ok(vfd)
977        } else if is_fence(&descriptor) {
978            let mut vfd = WlVfd::default();
979            vfd.is_fence = true;
980            vfd.fence = Some(descriptor);
981            Ok(vfd)
982        } else {
983            let flags = match FileFlags::from_file(&descriptor) {
984                Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
985                Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
986                Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
987                _ => 0,
988            };
989            let mut vfd = WlVfd::default();
990            vfd.local_pipe = Some((flags, descriptor));
991            Ok(vfd)
992        }
993    }
994
995    fn flags(&self, use_transition_flags: bool) -> u32 {
996        let mut flags = 0;
997        if use_transition_flags {
998            if self.socket.is_some() {
999                flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
1000            }
1001            if let Some((f, _)) = self.local_pipe {
1002                flags |= f;
1003            }
1004            if self.is_fence {
1005                flags |= VIRTIO_WL_VFD_FENCE;
1006            }
1007        } else {
1008            if self.socket.is_some() {
1009                flags |= VIRTIO_WL_VFD_CONTROL;
1010            }
1011            if self.slot.is_some() {
1012                flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
1013            }
1014        }
1015        flags
1016    }
1017
1018    // Offset within the shared memory region this VFD was mapped at.
1019    fn offset(&self) -> Option<u64> {
1020        self.slot.as_ref().map(|s| s.0)
1021    }
1022
1023    // Size in bytes of the shared memory VFD.
1024    fn size(&self) -> Option<u64> {
1025        self.guest_shared_memory.as_ref().map(|shm| shm.size())
1026    }
1027
1028    // The descriptor that gets sent if this VFD is sent over a socket.
1029    fn send_descriptor(&self) -> Option<RawDescriptor> {
1030        self.guest_shared_memory
1031            .as_ref()
1032            .map(|shm| shm.as_raw_descriptor())
1033            .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
1034            .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
1035            .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
1036    }
1037
1038    // The FD that is used for polling for events on this VFD.
1039    fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
1040        self.socket
1041            .as_ref()
1042            .map(|s| s as &dyn AsRawDescriptor)
1043            .or_else(|| {
1044                self.local_pipe
1045                    .as_ref()
1046                    .map(|(_, p)| p as &dyn AsRawDescriptor)
1047            })
1048            .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
1049    }
1050
1051    // Sends data/files from the guest to the host over this VFD.
1052    fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
1053        if let Some(socket) = &self.socket {
1054            socket
1055                .send_vectored_with_fds(&data.get_remaining(), rds)
1056                .map_err(WlError::SendVfd)?;
1057            // All remaining data in `data` is now considered consumed.
1058            data.consume(usize::MAX);
1059            Ok(WlResp::Ok)
1060        } else if let Some((_, local_pipe)) = &mut self.local_pipe {
1061            // Impossible to send descriptors over a simple pipe.
1062            if !rds.is_empty() {
1063                return Ok(WlResp::InvalidType);
1064            }
1065            data.read_to(local_pipe, usize::MAX)
1066                .map_err(WlError::WritePipe)?;
1067            Ok(WlResp::Ok)
1068        } else {
1069            Ok(WlResp::InvalidType)
1070        }
1071    }
1072
1073    // Receives data/files from the host for this VFD and queues it for the guest.
1074    fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
1075        if let Some(socket) = self.socket.take() {
1076            let mut buf = vec![0; IN_BUFFER_LEN];
1077            // If any errors happen, the socket will get dropped, preventing more reading.
1078            let (len, descriptors) = socket
1079                .recv_with_fds(&mut buf, VIRTWL_SEND_MAX_ALLOCS)
1080                .map_err(WlError::RecvVfd)?;
1081            // If any data gets read, the put the socket back for future recv operations.
1082            if len != 0 || !descriptors.is_empty() {
1083                buf.truncate(len);
1084                buf.shrink_to_fit();
1085                self.socket = Some(socket);
1086                in_file_queue.extend(descriptors.into_iter().map(File::from));
1087                return Ok(buf);
1088            }
1089            Ok(Vec::new())
1090        } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
1091            let mut buf = vec![0; IN_BUFFER_LEN];
1092            let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
1093            if len != 0 {
1094                buf.truncate(len);
1095                buf.shrink_to_fit();
1096                self.local_pipe = Some((flags, local_pipe));
1097                return Ok(buf);
1098            }
1099            Ok(Vec::new())
1100        } else {
1101            Ok(Vec::new())
1102        }
1103    }
1104
1105    // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
1106    // up events.
1107    fn close_remote(&mut self) {
1108        self.remote_pipe = None;
1109    }
1110
1111    fn close(&mut self) -> WlResult<()> {
1112        if let Some((offset, vm)) = self.slot.take() {
1113            vm.unregister_memory(offset)?;
1114        }
1115        self.socket = None;
1116        self.remote_pipe = None;
1117        self.local_pipe = None;
1118        Ok(())
1119    }
1120}
1121
1122impl Drop for WlVfd {
1123    fn drop(&mut self) {
1124        let _ = self.close();
1125    }
1126}
1127
1128#[derive(Debug)]
1129enum WlRecv {
1130    Vfd { id: u32 },
1131    Data { buf: Vec<u8> },
1132    Hup,
1133}
1134
1135pub struct WlState {
1136    wayland_paths: BTreeMap<String, PathBuf>,
1137    vm: VmRequester,
1138    resource_bridge: Option<Tube>,
1139    use_transition_flags: bool,
1140    wait_ctx: WaitContext<u32>,
1141    vfds: BTreeMap<u32, WlVfd>,
1142    next_vfd_id: u32,
1143    in_file_queue: Vec<File>,
1144    in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
1145    current_recv_vfd: Option<u32>,
1146    recv_vfds: Vec<u32>,
1147    #[cfg(feature = "gpu")]
1148    signaled_fence: Option<SafeDescriptor>,
1149    use_send_vfd_v2: bool,
1150    address_offset: Option<u64>,
1151}
1152
1153impl WlState {
1154    /// Create a new `WlState` instance for running a virtio-wl device.
1155    pub fn new(
1156        wayland_paths: BTreeMap<String, PathBuf>,
1157        mapper: Box<dyn SharedMemoryMapper>,
1158        use_transition_flags: bool,
1159        use_send_vfd_v2: bool,
1160        resource_bridge: Option<Tube>,
1161        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
1162        address_offset: Option<u64>,
1163    ) -> WlState {
1164        WlState {
1165            wayland_paths,
1166            vm: VmRequester::new(
1167                mapper,
1168                #[cfg(feature = "gbm")]
1169                gralloc,
1170            ),
1171            resource_bridge,
1172            wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
1173            use_transition_flags,
1174            vfds: BTreeMap::new(),
1175            next_vfd_id: NEXT_VFD_ID_BASE,
1176            in_file_queue: Vec::new(),
1177            in_queue: VecDeque::new(),
1178            current_recv_vfd: None,
1179            recv_vfds: Vec::new(),
1180            #[cfg(feature = "gpu")]
1181            signaled_fence: None,
1182            use_send_vfd_v2,
1183            address_offset,
1184        }
1185    }
1186
1187    /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
1188    /// long-term solution is to replace the WaitContext completely by spawning async workers
1189    /// instead.
1190    pub fn wait_ctx(&self) -> &WaitContext<u32> {
1191        &self.wait_ctx
1192    }
1193
1194    fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
1195        if id & VFD_ID_HOST_MASK != 0 {
1196            return Ok(WlResp::InvalidId);
1197        }
1198
1199        if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
1200            return Ok(WlResp::InvalidFlags);
1201        }
1202
1203        if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
1204            return Ok(WlResp::InvalidFlags);
1205        }
1206
1207        match self.vfds.entry(id) {
1208            Entry::Vacant(entry) => {
1209                let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
1210                    WlVfd::pipe_remote_read_local_write()?
1211                } else if flags & VIRTIO_WL_VFD_READ != 0 {
1212                    WlVfd::pipe_remote_write_local_read()?
1213                } else {
1214                    return Ok(WlResp::InvalidFlags);
1215                };
1216                self.wait_ctx
1217                    .add(vfd.wait_descriptor().unwrap(), id)
1218                    .map_err(WlError::WaitContextAdd)?;
1219                let resp = WlResp::VfdNew {
1220                    id,
1221                    flags: 0,
1222                    pfn: 0,
1223                    size: 0,
1224                    resp: true,
1225                };
1226                entry.insert(vfd);
1227                Ok(resp)
1228            }
1229            Entry::Occupied(_) => Ok(WlResp::InvalidId),
1230        }
1231    }
1232
1233    fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
1234        if id & VFD_ID_HOST_MASK != 0 {
1235            return Ok(WlResp::InvalidId);
1236        }
1237
1238        if self.use_transition_flags {
1239            if flags != 0 {
1240                return Ok(WlResp::InvalidFlags);
1241            }
1242        } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
1243            return Ok(WlResp::Err(Box::from("invalid flags")));
1244        }
1245
1246        if self.vfds.contains_key(&id) {
1247            return Ok(WlResp::InvalidId);
1248        }
1249        let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
1250        let resp = WlResp::VfdNew {
1251            id,
1252            flags,
1253            pfn: self.compute_pfn(&vfd.offset()),
1254            size: vfd.size().unwrap_or_default() as u32,
1255            resp: true,
1256        };
1257        self.vfds.insert(id, vfd);
1258        Ok(resp)
1259    }
1260
1261    #[cfg(feature = "gbm")]
1262    fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1263        if id & VFD_ID_HOST_MASK != 0 {
1264            return Ok(WlResp::InvalidId);
1265        }
1266
1267        if self.vfds.contains_key(&id) {
1268            return Ok(WlResp::InvalidId);
1269        }
1270        let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1271        let resp = WlResp::VfdNewDmabuf {
1272            id,
1273            flags: 0,
1274            pfn: self.compute_pfn(&vfd.offset()),
1275            size: vfd.size().unwrap_or_default() as u32,
1276            desc,
1277        };
1278        self.vfds.insert(id, vfd);
1279        Ok(resp)
1280    }
1281
1282    #[cfg(feature = "gbm")]
1283    fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1284        if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1285            return Ok(WlResp::InvalidFlags);
1286        }
1287
1288        match self.vfds.get_mut(&vfd_id) {
1289            Some(vfd) => {
1290                vfd.dmabuf_sync(flags)?;
1291                Ok(WlResp::Ok)
1292            }
1293            None => Ok(WlResp::InvalidId),
1294        }
1295    }
1296
1297    fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1298        if id & VFD_ID_HOST_MASK != 0 {
1299            return Ok(WlResp::InvalidId);
1300        }
1301
1302        let flags = if self.use_transition_flags {
1303            VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1304        } else {
1305            VIRTIO_WL_VFD_CONTROL
1306        };
1307
1308        match self.vfds.entry(id) {
1309            Entry::Vacant(entry) => {
1310                let vfd = entry.insert(WlVfd::connect(
1311                    self.wayland_paths
1312                        .get(name)
1313                        .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1314                )?);
1315                self.wait_ctx
1316                    .add(vfd.wait_descriptor().unwrap(), id)
1317                    .map_err(WlError::WaitContextAdd)?;
1318                Ok(WlResp::VfdNew {
1319                    id,
1320                    flags,
1321                    pfn: 0,
1322                    size: 0,
1323                    resp: true,
1324                })
1325            }
1326            Entry::Occupied(_) => Ok(WlResp::InvalidId),
1327        }
1328    }
1329
1330    fn process_wait_context(&mut self) {
1331        let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1332            Ok(v) => v,
1333            Err(e) => {
1334                error!("failed waiting for vfd evens: {}", e);
1335                return;
1336            }
1337        };
1338
1339        for event in events.iter().filter(|e| e.is_readable) {
1340            if let Err(e) = self.recv(event.token) {
1341                error!("failed to recv from vfd: {}", e)
1342            }
1343        }
1344
1345        for event in events.iter().filter(|e| e.is_hungup) {
1346            if !event.is_readable {
1347                let vfd_id = event.token;
1348                if let Some(descriptor) =
1349                    self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1350                {
1351                    if let Err(e) = self.wait_ctx.delete(descriptor) {
1352                        warn!("failed to remove hungup vfd from poll context: {}", e);
1353                    }
1354                }
1355                self.in_queue.push_back((vfd_id, WlRecv::Hup));
1356            }
1357        }
1358    }
1359
1360    fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1361        let mut to_delete = BTreeSet::new();
1362        for (dest_vfd_id, q) in &self.in_queue {
1363            if *dest_vfd_id == vfd_id {
1364                if let WlRecv::Vfd { id } = q {
1365                    to_delete.insert(*id);
1366                }
1367            }
1368        }
1369        for vfd_id in to_delete {
1370            // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1371            let _ = self.close(vfd_id);
1372        }
1373        match self.vfds.remove(&vfd_id) {
1374            Some(mut vfd) => {
1375                self.in_queue.retain(|&(id, _)| id != vfd_id);
1376                vfd.close()?;
1377                Ok(WlResp::Ok)
1378            }
1379            None => Ok(WlResp::InvalidId),
1380        }
1381    }
1382
1383    #[cfg(feature = "gpu")]
1384    fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
1385        let sock = self.resource_bridge.as_ref().unwrap();
1386        match get_resource_info(sock, request) {
1387            Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
1388            Ok(ResourceInfo::Fence { handle }) => Some(handle),
1389            Err(ResourceBridgeError::InvalidResource(req)) => {
1390                warn!("attempt to send non-existent gpu resource {}", req);
1391                None
1392            }
1393            Err(e) => {
1394                error!("{}", e);
1395                // If there was an error with the resource bridge, it can no longer be
1396                // trusted to continue to function.
1397                self.resource_bridge = None;
1398                None
1399            }
1400        }
1401    }
1402
1403    fn send(
1404        &mut self,
1405        vfd_id: u32,
1406        vfd_count: usize,
1407        foreign_id: bool,
1408        reader: &mut Reader,
1409    ) -> WlResult<WlResp> {
1410        // First stage gathers and normalizes all id information from guest memory.
1411        let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1412            kind: Le32::from(0),
1413            payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1414        }; VIRTWL_SEND_MAX_ALLOCS];
1415        for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1416            *vfd_id = if foreign_id {
1417                if self.use_send_vfd_v2 {
1418                    reader.read_obj().map_err(WlError::ParseDesc)?
1419                } else {
1420                    let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1421                    CtrlVfdSendVfdV2 {
1422                        kind: vfd.kind,
1423                        payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1424                    }
1425                }
1426            } else {
1427                CtrlVfdSendVfdV2 {
1428                    kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1429                    payload: CtrlVfdSendVfdV2Payload {
1430                        id: reader.read_obj().map_err(WlError::ParseDesc)?,
1431                    },
1432                }
1433            };
1434        }
1435
1436        // Next stage collects corresponding file descriptors for each id.
1437        let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1438        #[cfg(feature = "gpu")]
1439        let mut bridged_files = Vec::new();
1440        for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1441            match send_vfd_id.kind.to_native() {
1442                VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1443                    match self.vfds.get(&send_vfd_id.id().to_native()) {
1444                        Some(vfd) => match vfd.send_descriptor() {
1445                            Some(vfd_fd) => *descriptor = vfd_fd,
1446                            None => return Ok(WlResp::InvalidType),
1447                        },
1448                        None => {
1449                            warn!(
1450                                "attempt to send non-existant vfd 0x{:08x}",
1451                                send_vfd_id.id().to_native()
1452                            );
1453                            return Ok(WlResp::InvalidId);
1454                        }
1455                    }
1456                }
1457                #[cfg(feature = "gpu")]
1458                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1459                    match self.get_info(ResourceRequest::GetBuffer {
1460                        id: send_vfd_id.id().to_native(),
1461                    }) {
1462                        Some(handle) => {
1463                            *descriptor = handle.as_raw_descriptor();
1464                            bridged_files.push(handle.into());
1465                        }
1466                        None => return Ok(WlResp::InvalidId),
1467                    }
1468                }
1469                #[cfg(feature = "gpu")]
1470                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1471                    match self.get_info(ResourceRequest::GetFence {
1472                        seqno: send_vfd_id.seqno().to_native(),
1473                    }) {
1474                        Some(handle) => {
1475                            *descriptor = handle.as_raw_descriptor();
1476                            bridged_files.push(handle.into());
1477                        }
1478                        None => return Ok(WlResp::InvalidId),
1479                    }
1480                }
1481                #[cfg(feature = "gpu")]
1482                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1483                    if self.resource_bridge.is_some() =>
1484                {
1485                    if self.signaled_fence.is_none() {
1486                        // If the guest is sending a signaled fence, we know a fence
1487                        // with seqno 0 must already be signaled.
1488                        match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1489                            Some(handle) => self.signaled_fence = Some(handle),
1490                            None => return Ok(WlResp::InvalidId),
1491                        }
1492                    }
1493                    match self.signaled_fence.as_ref().unwrap().try_clone() {
1494                        Ok(dup) => {
1495                            *descriptor = dup.into_raw_descriptor();
1496                            // SAFETY:
1497                            // Safe because the fd comes from a valid SafeDescriptor.
1498                            let file: File = unsafe {
1499                                base::FromRawDescriptor::from_raw_descriptor(*descriptor)
1500                            };
1501                            bridged_files.push(file);
1502                        }
1503                        Err(_) => return Ok(WlResp::InvalidId),
1504                    }
1505                }
1506                VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1507                | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1508                | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1509                    let _ = self.resource_bridge.as_ref();
1510                    warn!("attempt to send foreign resource kind but feature is disabled");
1511                }
1512                kind => {
1513                    warn!("attempt to send unknown foreign resource kind: {}", kind);
1514                    return Ok(WlResp::InvalidId);
1515                }
1516            }
1517        }
1518
1519        // Final stage sends file descriptors and data to the target vfd's socket.
1520        match self.vfds.get_mut(&vfd_id) {
1521            Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1522                WlResp::Ok => {}
1523                _ => return Ok(WlResp::InvalidType),
1524            },
1525            None => return Ok(WlResp::InvalidId),
1526        }
1527        // The vfds with remote FDs need to be closed so that the local side can receive
1528        // hangup events.
1529        for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1530            if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1531                if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1532                    vfd.close_remote();
1533                }
1534            }
1535        }
1536        Ok(WlResp::Ok)
1537    }
1538
1539    fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1540        let buf = match self.vfds.get_mut(&vfd_id) {
1541            Some(vfd) => {
1542                if vfd.is_fence {
1543                    if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1544                        warn!("failed to remove hungup vfd from poll context: {}", e);
1545                    }
1546                    self.in_queue.push_back((vfd_id, WlRecv::Hup));
1547                    return Ok(());
1548                } else {
1549                    vfd.recv(&mut self.in_file_queue)?
1550                }
1551            }
1552            None => return Ok(()),
1553        };
1554
1555        if self.in_file_queue.is_empty() && buf.is_empty() {
1556            self.in_queue.push_back((vfd_id, WlRecv::Hup));
1557            return Ok(());
1558        }
1559        for file in self.in_file_queue.drain(..) {
1560            let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1561            if let Some(wait_descriptor) = vfd.wait_descriptor() {
1562                self.wait_ctx
1563                    .add(wait_descriptor, self.next_vfd_id)
1564                    .map_err(WlError::WaitContextAdd)?;
1565            }
1566            // Only necessary if we somehow wrap the id counter. The try_insert
1567            // API would be nicer, but that's currently experimental.
1568            while self.vfds.contains_key(&self.next_vfd_id) {
1569                self.next_vfd_id += 1;
1570            }
1571            self.vfds.insert(self.next_vfd_id, vfd);
1572            self.in_queue.push_back((
1573                vfd_id,
1574                WlRecv::Vfd {
1575                    id: self.next_vfd_id,
1576                },
1577            ));
1578            self.next_vfd_id += 1;
1579        }
1580        self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1581
1582        Ok(())
1583    }
1584
1585    fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1586        let type_: Le32 = reader.peek_obj::<Le32>().map_err(WlError::ParseDesc)?;
1587        match type_.into() {
1588            VIRTIO_WL_CMD_VFD_NEW => {
1589                let ctrl = reader
1590                    .read_obj::<CtrlVfdNew>()
1591                    .map_err(WlError::ParseDesc)?;
1592                self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1593            }
1594            VIRTIO_WL_CMD_VFD_CLOSE => {
1595                let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1596                self.close(ctrl.id.into())
1597            }
1598            VIRTIO_WL_CMD_VFD_SEND => {
1599                let ctrl = reader
1600                    .read_obj::<CtrlVfdSend>()
1601                    .map_err(WlError::ParseDesc)?;
1602                let foreign_id = false;
1603                self.send(
1604                    ctrl.id.into(),
1605                    ctrl.vfd_count.to_native() as usize,
1606                    foreign_id,
1607                    reader,
1608                )
1609            }
1610            #[cfg(feature = "gpu")]
1611            VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1612                let ctrl = reader
1613                    .read_obj::<CtrlVfdSend>()
1614                    .map_err(WlError::ParseDesc)?;
1615                let foreign_id = true;
1616                self.send(
1617                    ctrl.id.into(),
1618                    ctrl.vfd_count.to_native() as usize,
1619                    foreign_id,
1620                    reader,
1621                )
1622            }
1623            VIRTIO_WL_CMD_VFD_NEW_CTX => {
1624                let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1625                self.new_context(ctrl.id.into(), "")
1626            }
1627            VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1628                let ctrl = reader
1629                    .read_obj::<CtrlVfdNew>()
1630                    .map_err(WlError::ParseDesc)?;
1631                self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1632            }
1633            #[cfg(feature = "gbm")]
1634            VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1635                let ctrl = reader
1636                    .read_obj::<CtrlVfdNewDmabuf>()
1637                    .map_err(WlError::ParseDesc)?;
1638                self.new_dmabuf(
1639                    ctrl.id.into(),
1640                    ctrl.width.into(),
1641                    ctrl.height.into(),
1642                    ctrl.format.into(),
1643                )
1644            }
1645            #[cfg(feature = "gbm")]
1646            VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1647                let ctrl = reader
1648                    .read_obj::<CtrlVfdDmabufSync>()
1649                    .map_err(WlError::ParseDesc)?;
1650                self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1651            }
1652            VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1653                let ctrl = reader
1654                    .read_obj::<CtrlVfdNewCtxNamed>()
1655                    .map_err(WlError::ParseDesc)?;
1656                let name_len = ctrl
1657                    .name
1658                    .iter()
1659                    .position(|x| x == &0)
1660                    .unwrap_or(ctrl.name.len());
1661                let name =
1662                    std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1663                self.new_context(ctrl.id.into(), name)
1664            }
1665            op_type => {
1666                warn!("unexpected command {}", op_type);
1667                Ok(WlResp::InvalidCommand)
1668            }
1669        }
1670    }
1671
1672    fn next_recv(&self) -> Option<WlResp> {
1673        if let Some(q) = self.in_queue.front() {
1674            match *q {
1675                (vfd_id, WlRecv::Vfd { id }) => {
1676                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1677                        match self.vfds.get(&id) {
1678                            Some(vfd) => Some(WlResp::VfdNew {
1679                                id,
1680                                flags: vfd.flags(self.use_transition_flags),
1681                                pfn: self.compute_pfn(&vfd.offset()),
1682                                size: vfd.size().unwrap_or_default() as u32,
1683                                resp: false,
1684                            }),
1685                            _ => Some(WlResp::VfdNew {
1686                                id,
1687                                flags: 0,
1688                                pfn: 0,
1689                                size: 0,
1690                                resp: false,
1691                            }),
1692                        }
1693                    } else {
1694                        Some(WlResp::VfdRecv {
1695                            id: self.current_recv_vfd.unwrap(),
1696                            data: &[],
1697                            vfds: &self.recv_vfds[..],
1698                        })
1699                    }
1700                }
1701                (vfd_id, WlRecv::Data { ref buf }) => {
1702                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1703                        Some(WlResp::VfdRecv {
1704                            id: vfd_id,
1705                            data: &buf[..],
1706                            vfds: &self.recv_vfds[..],
1707                        })
1708                    } else {
1709                        Some(WlResp::VfdRecv {
1710                            id: self.current_recv_vfd.unwrap(),
1711                            data: &[],
1712                            vfds: &self.recv_vfds[..],
1713                        })
1714                    }
1715                }
1716                (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1717            }
1718        } else {
1719            None
1720        }
1721    }
1722
1723    fn pop_recv(&mut self) {
1724        if let Some(q) = self.in_queue.front() {
1725            match *q {
1726                (vfd_id, WlRecv::Vfd { id }) => {
1727                    if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1728                        self.recv_vfds.push(id);
1729                        self.current_recv_vfd = Some(vfd_id);
1730                    } else {
1731                        self.recv_vfds.clear();
1732                        self.current_recv_vfd = None;
1733                        return;
1734                    }
1735                }
1736                (vfd_id, WlRecv::Data { .. }) => {
1737                    self.recv_vfds.clear();
1738                    self.current_recv_vfd = None;
1739                    if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1740                        return;
1741                    }
1742                }
1743                (_, WlRecv::Hup) => {
1744                    self.recv_vfds.clear();
1745                    self.current_recv_vfd = None;
1746                }
1747            }
1748        }
1749        self.in_queue.pop_front();
1750    }
1751
1752    fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
1753        let addr = match (offset, self.address_offset) {
1754            (Some(o), Some(address_offset)) => o + address_offset,
1755            (Some(o), None) => *o,
1756            // without shmem, 0 is the special address for "no_pfn"
1757            (None, Some(_)) => 0,
1758            // with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
1759            (None, None) => WL_SHMEM_SIZE,
1760        };
1761        addr >> VIRTIO_WL_PFN_SHIFT
1762    }
1763}
1764
1765#[derive(ThisError, Debug, PartialEq, Eq)]
1766#[error("no descriptors available in queue")]
1767pub struct DescriptorsExhausted;
1768
1769/// Handle incoming events and forward them to the VM over the input queue.
1770pub fn process_in_queue(
1771    in_queue: &mut Queue,
1772    state: &mut WlState,
1773) -> ::std::result::Result<(), DescriptorsExhausted> {
1774    state.process_wait_context();
1775
1776    let mut needs_interrupt = false;
1777    let mut exhausted_queue = false;
1778    loop {
1779        let mut desc = if let Some(d) = in_queue.peek() {
1780            d
1781        } else {
1782            exhausted_queue = true;
1783            break;
1784        };
1785
1786        let mut should_pop = false;
1787        if let Some(in_resp) = state.next_recv() {
1788            match encode_resp(&mut desc.writer, in_resp) {
1789                Ok(()) => {
1790                    should_pop = true;
1791                }
1792                Err(e) => {
1793                    error!("failed to encode response to descriptor chain: {}", e);
1794                }
1795            }
1796            needs_interrupt = true;
1797            let desc = desc.pop();
1798            in_queue.add_used(desc);
1799        } else {
1800            break;
1801        }
1802        if should_pop {
1803            state.pop_recv();
1804        }
1805    }
1806
1807    if needs_interrupt {
1808        in_queue.trigger_interrupt();
1809    }
1810
1811    if exhausted_queue {
1812        Err(DescriptorsExhausted)
1813    } else {
1814        Ok(())
1815    }
1816}
1817
1818/// Handle messages from the output queue and forward them to the display sever, if necessary.
1819pub fn process_out_queue(out_queue: &mut Queue, state: &mut WlState) {
1820    let mut needs_interrupt = false;
1821    while let Some(mut desc) = out_queue.pop() {
1822        let resp = match state.execute(&mut desc.reader) {
1823            Ok(r) => r,
1824            Err(e) => WlResp::Err(Box::new(e)),
1825        };
1826
1827        match encode_resp(&mut desc.writer, resp) {
1828            Ok(()) => {}
1829            Err(e) => {
1830                error!("failed to encode response to descriptor chain: {}", e);
1831            }
1832        }
1833
1834        out_queue.add_used(desc);
1835        needs_interrupt = true;
1836    }
1837
1838    if needs_interrupt {
1839        out_queue.trigger_interrupt();
1840    }
1841}
1842
1843struct Worker {
1844    in_queue: Queue,
1845    out_queue: Queue,
1846    state: WlState,
1847}
1848
1849impl Worker {
1850    fn new(
1851        in_queue: Queue,
1852        out_queue: Queue,
1853        wayland_paths: BTreeMap<String, PathBuf>,
1854        mapper: Box<dyn SharedMemoryMapper>,
1855        use_transition_flags: bool,
1856        use_send_vfd_v2: bool,
1857        resource_bridge: Option<Tube>,
1858        #[cfg(feature = "gbm")] gralloc: RutabagaGralloc,
1859        address_offset: Option<u64>,
1860    ) -> Worker {
1861        Worker {
1862            in_queue,
1863            out_queue,
1864            state: WlState::new(
1865                wayland_paths,
1866                mapper,
1867                use_transition_flags,
1868                use_send_vfd_v2,
1869                resource_bridge,
1870                #[cfg(feature = "gbm")]
1871                gralloc,
1872                address_offset,
1873            ),
1874        }
1875    }
1876
1877    fn run(&mut self, kill_evt: Event) -> anyhow::Result<()> {
1878        #[derive(EventToken)]
1879        enum Token {
1880            InQueue,
1881            OutQueue,
1882            Kill,
1883            State,
1884        }
1885
1886        let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
1887            (self.in_queue.event(), Token::InQueue),
1888            (self.out_queue.event(), Token::OutQueue),
1889            (&kill_evt, Token::Kill),
1890            (&self.state.wait_ctx, Token::State),
1891        ])
1892        .context("failed creating WaitContext")?;
1893
1894        let mut watching_state_ctx = true;
1895        'wait: loop {
1896            let events = match wait_ctx.wait() {
1897                Ok(v) => v,
1898                Err(e) => {
1899                    error!("failed waiting for events: {}", e);
1900                    break;
1901                }
1902            };
1903
1904            for event in &events {
1905                match event.token {
1906                    Token::InQueue => {
1907                        let _ = self.in_queue.event().wait();
1908                        if !watching_state_ctx {
1909                            if let Err(e) =
1910                                wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1911                            {
1912                                error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1913                                break;
1914                            }
1915                            watching_state_ctx = true;
1916                        }
1917                    }
1918                    Token::OutQueue => {
1919                        let _ = self.out_queue.event().wait();
1920                        process_out_queue(&mut self.out_queue, &mut self.state);
1921                    }
1922                    Token::Kill => break 'wait,
1923                    Token::State => {
1924                        if let Err(DescriptorsExhausted) =
1925                            process_in_queue(&mut self.in_queue, &mut self.state)
1926                        {
1927                            if let Err(e) =
1928                                wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1929                            {
1930                                error!(
1931                                    "Failed to stop watching wait_ctx descriptor for WlState: {}",
1932                                    e
1933                                );
1934                                break;
1935                            }
1936                            watching_state_ctx = false;
1937                        }
1938                    }
1939                }
1940            }
1941        }
1942
1943        Ok(())
1944    }
1945}
1946
1947pub struct Wl {
1948    worker_thread: Option<WorkerThread<BTreeMap<usize, Queue>>>,
1949    wayland_paths: BTreeMap<String, PathBuf>,
1950    mapper: Option<Box<dyn SharedMemoryMapper>>,
1951    resource_bridge: Option<Tube>,
1952    base_features: u64,
1953    acked_features: u64,
1954    #[cfg(feature = "gbm")]
1955    gralloc: Option<RutabagaGralloc>,
1956    address_offset: Option<u64>,
1957}
1958
1959impl Wl {
1960    pub fn new(
1961        base_features: u64,
1962        wayland_paths: BTreeMap<String, PathBuf>,
1963        resource_bridge: Option<Tube>,
1964    ) -> Result<Wl> {
1965        Ok(Wl {
1966            worker_thread: None,
1967            wayland_paths,
1968            mapper: None,
1969            resource_bridge,
1970            base_features,
1971            acked_features: 0,
1972            #[cfg(feature = "gbm")]
1973            gralloc: None,
1974            address_offset: None,
1975        })
1976    }
1977}
1978
1979impl VirtioDevice for Wl {
1980    fn keep_rds(&self) -> Vec<RawDescriptor> {
1981        let mut keep_rds = Vec::new();
1982
1983        if let Some(mapper) = &self.mapper {
1984            if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
1985                keep_rds.push(raw_descriptor);
1986            }
1987        }
1988        if let Some(resource_bridge) = &self.resource_bridge {
1989            keep_rds.push(resource_bridge.as_raw_descriptor());
1990        }
1991        keep_rds
1992    }
1993
1994    #[cfg(feature = "gbm")]
1995    fn on_device_sandboxed(&mut self) {
1996        // Gralloc initialization can cause some GPU drivers to create their own threads
1997        // and that must be done after sandboxing.
1998        match RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()) {
1999            Ok(g) => self.gralloc = Some(g),
2000            Err(e) => {
2001                error!("failed to initialize gralloc {:?}", e);
2002            }
2003        };
2004    }
2005
2006    fn device_type(&self) -> DeviceType {
2007        DeviceType::Wl
2008    }
2009
2010    fn queue_max_sizes(&self) -> &[u16] {
2011        QUEUE_SIZES
2012    }
2013
2014    fn features(&self) -> u64 {
2015        self.base_features
2016            | 1 << VIRTIO_WL_F_TRANS_FLAGS
2017            | 1 << VIRTIO_WL_F_SEND_FENCES
2018            | 1 << VIRTIO_WL_F_USE_SHMEM
2019    }
2020
2021    fn ack_features(&mut self, value: u64) {
2022        self.acked_features |= value;
2023    }
2024
2025    fn activate(
2026        &mut self,
2027        _mem: GuestMemory,
2028        _interrupt: Interrupt,
2029        mut queues: BTreeMap<usize, Queue>,
2030    ) -> anyhow::Result<()> {
2031        if queues.len() != QUEUE_SIZES.len() {
2032            return Err(anyhow!(
2033                "expected {} queues, got {}",
2034                QUEUE_SIZES.len(),
2035                queues.len()
2036            ));
2037        }
2038
2039        let mapper = self.mapper.take().context("missing mapper")?;
2040
2041        let wayland_paths = self.wayland_paths.clone();
2042        let use_transition_flags = self.acked_features & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0;
2043        let use_send_vfd_v2 = self.acked_features & (1 << VIRTIO_WL_F_SEND_FENCES) != 0;
2044        let use_shmem = self.acked_features & (1 << VIRTIO_WL_F_USE_SHMEM) != 0;
2045        let resource_bridge = self.resource_bridge.take();
2046        #[cfg(feature = "gbm")]
2047        let gralloc = self
2048            .gralloc
2049            .take()
2050            .expect("gralloc already passed to worker");
2051        let address_offset = if !use_shmem {
2052            self.address_offset
2053        } else {
2054            None
2055        };
2056
2057        self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
2058            let mut worker = Worker::new(
2059                queues.pop_first().unwrap().1,
2060                queues.pop_first().unwrap().1,
2061                wayland_paths,
2062                mapper,
2063                use_transition_flags,
2064                use_send_vfd_v2,
2065                resource_bridge,
2066                #[cfg(feature = "gbm")]
2067                gralloc,
2068                address_offset,
2069            );
2070            if let Err(e) = worker.run(kill_evt) {
2071                error!("wl worker failed: {e:#}");
2072            }
2073            BTreeMap::from_iter([worker.in_queue, worker.out_queue].into_iter().enumerate())
2074        }));
2075
2076        Ok(())
2077    }
2078
2079    fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
2080        Some(SharedMemoryRegion {
2081            id: WL_SHMEM_ID,
2082            length: WL_SHMEM_SIZE,
2083        })
2084    }
2085
2086    fn set_shared_memory_region(&mut self, shmem_region: AddressRange) {
2087        self.address_offset = Some(shmem_region.start);
2088    }
2089
2090    fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
2091        self.mapper = Some(mapper);
2092    }
2093
2094    fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
2095        if let Some(worker_thread) = self.worker_thread.take() {
2096            let queues = worker_thread.stop();
2097            return Ok(Some(queues));
2098        }
2099        Ok(None)
2100    }
2101
2102    fn virtio_wake(
2103        &mut self,
2104        device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
2105    ) -> anyhow::Result<()> {
2106        match device_state {
2107            None => Ok(()),
2108            Some((mem, interrupt, queues)) => {
2109                // TODO: activate is just what we want at the moment, but we should probably move
2110                // it into a "start workers" function to make it obvious that it isn't strictly
2111                // used for activate events.
2112                self.activate(mem, interrupt, queues)?;
2113                Ok(())
2114            }
2115        }
2116    }
2117}