use std::cell::RefCell;
use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::VecDeque;
use std::convert::From;
use std::error::Error as StdError;
use std::fmt;
use std::fs::File;
use std::io;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::mem::size_of;
#[cfg(feature = "minigbm")]
use std::os::raw::c_uint;
#[cfg(feature = "minigbm")]
use std::os::raw::c_ulonglong;
use std::os::unix::net::UnixStream;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::result;
use std::time::Duration;
use anyhow::anyhow;
use anyhow::Context;
use base::error;
#[cfg(feature = "minigbm")]
use base::ioctl_iow_nr;
use base::ioctl_iowr_nr;
use base::ioctl_with_ref;
use base::linux::SharedMemoryLinux;
use base::pagesize;
use base::pipe;
use base::round_up_to_page_size;
use base::unix::FileFlags;
use base::warn;
use base::AsRawDescriptor;
use base::Error;
use base::Event;
use base::EventToken;
use base::EventType;
#[cfg(feature = "gpu")]
use base::IntoRawDescriptor;
#[cfg(feature = "minigbm")]
use base::MemoryMappingBuilder;
#[cfg(feature = "minigbm")]
use base::MmapError;
use base::Protection;
use base::RawDescriptor;
use base::Result;
use base::SafeDescriptor;
use base::ScmSocket;
use base::SharedMemory;
use base::Tube;
use base::TubeError;
use base::VolatileMemoryError;
use base::WaitContext;
use base::WorkerThread;
use data_model::Le32;
use data_model::Le64;
use hypervisor::MemCacheType;
#[cfg(feature = "minigbm")]
use libc::EBADF;
#[cfg(feature = "minigbm")]
use libc::EINVAL;
#[cfg(feature = "minigbm")]
use libc::ENOSYS;
use remain::sorted;
use resources::address_allocator::AddressAllocator;
use resources::AddressRange;
use resources::Alloc;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::DrmFormat;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::ImageAllocationInfo;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::ImageMemoryRequirements;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaDescriptor;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaError;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaGralloc;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaGrallocBackendFlags;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaGrallocFlags;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RutabagaIntoRawDescriptor;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
#[cfg(feature = "minigbm")]
use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
use thiserror::Error as ThisError;
use vm_control::VmMemorySource;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;
use vm_memory::GuestMemoryError;
use zerocopy::AsBytes;
use zerocopy::FromBytes;
use zerocopy::FromZeroes;
#[cfg(feature = "gpu")]
use super::resource_bridge::get_resource_info;
#[cfg(feature = "gpu")]
use super::resource_bridge::BufferInfo;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceBridgeError;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceInfo;
#[cfg(feature = "gpu")]
use super::resource_bridge::ResourceRequest;
use super::DeviceType;
use super::Interrupt;
use super::Queue;
use super::Reader;
use super::SharedMemoryMapper;
use super::SharedMemoryRegion;
use super::VirtioDevice;
use super::Writer;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
const QUEUE_SIZE: u16 = 256;
const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
#[cfg(feature = "gpu")]
const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
const VIRTIO_WL_RESP_OK: u32 = 4096;
const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
#[cfg(feature = "minigbm")]
const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
const VIRTIO_WL_RESP_ERR: u32 = 4352;
const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
const VIRTIO_WL_VFD_READ: u32 = 0x2;
const VIRTIO_WL_VFD_MAP: u32 = 0x2;
const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
const NEXT_VFD_ID_BASE: u32 = 0x40000000;
const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
const IN_BUFFER_LEN: usize =
0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
#[cfg(feature = "minigbm")]
const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
#[cfg(feature = "minigbm")]
const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
#[cfg(feature = "minigbm")]
const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
#[cfg(feature = "minigbm")]
const DMA_BUF_SYNC_END: c_uint = 0x4;
#[cfg(feature = "minigbm")]
#[repr(C)]
#[derive(Copy, Clone)]
struct dma_buf_sync {
flags: c_ulonglong,
}
#[cfg(feature = "minigbm")]
ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
#[repr(C)]
#[derive(Copy, Clone, Default)]
struct sync_file_info {
name: [u8; 32],
status: i32,
flags: u32,
num_fences: u32,
pad: u32,
sync_fence_info: u64,
}
ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
fn is_fence(f: &File) -> bool {
let info = sync_file_info::default();
unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO, &info) == 0 }
}
#[cfg(feature = "minigbm")]
#[derive(Debug, Default)]
struct GpuMemoryPlaneDesc {
stride: u32,
offset: u32,
}
#[cfg(feature = "minigbm")]
#[derive(Debug, Default)]
struct GpuMemoryDesc {
planes: [GpuMemoryPlaneDesc; 3],
}
const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
const VIRTIO_WL_PFN_SHIFT: u32 = 12;
fn encode_vfd_new(
writer: &mut Writer,
resp: bool,
vfd_id: u32,
flags: u32,
pfn: u64,
size: u32,
) -> WlResult<()> {
let ctrl_vfd_new = CtrlVfdNew {
hdr: CtrlHeader {
type_: Le32::from(if resp {
VIRTIO_WL_RESP_VFD_NEW
} else {
VIRTIO_WL_CMD_VFD_NEW
}),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
flags: Le32::from(flags),
pfn: Le64::from(pfn),
size: Le32::from(size),
padding: Default::default(),
};
writer
.write_obj(ctrl_vfd_new)
.map_err(WlError::WriteResponse)
}
#[cfg(feature = "minigbm")]
fn encode_vfd_new_dmabuf(
writer: &mut Writer,
vfd_id: u32,
flags: u32,
pfn: u64,
size: u32,
desc: GpuMemoryDesc,
) -> WlResult<()> {
let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
flags: Le32::from(flags),
pfn: Le64::from(pfn),
size: Le32::from(size),
width: Le32::from(0),
height: Le32::from(0),
format: Le32::from(0),
stride0: Le32::from(desc.planes[0].stride),
stride1: Le32::from(desc.planes[1].stride),
stride2: Le32::from(desc.planes[2].stride),
offset0: Le32::from(desc.planes[0].offset),
offset1: Le32::from(desc.planes[1].offset),
offset2: Le32::from(desc.planes[2].offset),
};
writer
.write_obj(ctrl_vfd_new_dmabuf)
.map_err(WlError::WriteResponse)
}
fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
let ctrl_vfd_recv = CtrlVfdRecv {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
vfd_count: Le32::from(vfd_ids.len() as u32),
};
writer
.write_obj(ctrl_vfd_recv)
.map_err(WlError::WriteResponse)?;
for &recv_vfd_id in vfd_ids.iter() {
writer
.write_obj(Le32::from(recv_vfd_id))
.map_err(WlError::WriteResponse)?;
}
writer.write_all(data).map_err(WlError::WriteResponse)
}
fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
let ctrl_vfd_new = CtrlVfd {
hdr: CtrlHeader {
type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
flags: Le32::from(0),
},
id: Le32::from(vfd_id),
};
writer
.write_obj(ctrl_vfd_new)
.map_err(WlError::WriteResponse)
}
fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
match resp {
WlResp::VfdNew {
id,
flags,
pfn,
size,
resp,
} => encode_vfd_new(writer, resp, id, flags, pfn, size),
#[cfg(feature = "minigbm")]
WlResp::VfdNewDmabuf {
id,
flags,
pfn,
size,
desc,
} => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
r => writer
.write_obj(Le32::from(r.get_code()))
.map_err(WlError::WriteResponse),
}
}
#[allow(dead_code)]
#[sorted]
#[derive(ThisError, Debug)]
enum WlError {
#[error("overflow in calculation")]
CheckedOffset,
#[error("failed to synchronize DMABuf access: {0}")]
DmabufSync(io::Error),
#[error("failed to create shared memory from descriptor: {0}")]
FromSharedMemory(Error),
#[error("failed to get seals: {0}")]
GetSeals(Error),
#[error("gralloc error: {0}")]
#[cfg(feature = "minigbm")]
GrallocError(#[from] RutabagaError),
#[error("access violation in guest memory: {0}")]
GuestMemory(#[from] GuestMemoryError),
#[error("invalid string: {0}")]
InvalidString(std::str::Utf8Error),
#[error("failed to create shared memory allocation: {0}")]
NewAlloc(Error),
#[error("failed to create pipe: {0}")]
NewPipe(Error),
#[error("error parsing descriptor: {0}")]
ParseDesc(io::Error),
#[error("failed to read a pipe: {0}")]
ReadPipe(io::Error),
#[error("failed to recv on a socket: {0}")]
RecvVfd(io::Error),
#[error("failed to send on a socket: {0}")]
SendVfd(io::Error),
#[error("shmem mapper failure: {0}")]
ShmemMapperError(anyhow::Error),
#[error("failed to connect socket: {0}")]
SocketConnect(io::Error),
#[error("failed to set socket as non-blocking: {0}")]
SocketNonBlock(io::Error),
#[error("unknown socket name: {0}")]
UnknownSocketName(String),
#[error("invalid response from parent VM")]
VmBadResponse,
#[error("failed to control parent VM: {0}")]
VmControl(TubeError),
#[error("access violating in guest volatile memory: {0}")]
VolatileMemory(#[from] VolatileMemoryError),
#[error("failed to listen to descriptor on wait context: {0}")]
WaitContextAdd(Error),
#[error("failed to write to a pipe: {0}")]
WritePipe(io::Error),
#[error("failed to write response: {0}")]
WriteResponse(io::Error),
}
type WlResult<T> = result::Result<T, WlError>;
pub const WL_SHMEM_ID: u8 = 0;
pub const WL_SHMEM_SIZE: u64 = 1 << 32;
struct VmRequesterState {
mapper: Box<dyn SharedMemoryMapper>,
#[cfg(feature = "minigbm")]
gralloc: RutabagaGralloc,
address_allocator: AddressAllocator,
allocs: BTreeMap<u64 , Alloc>,
next_alloc: usize,
}
#[derive(Clone)]
struct VmRequester {
state: Rc<RefCell<VmRequesterState>>,
}
#[cfg(feature = "minigbm")]
fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
unsafe { base::FromRawDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
}
impl VmRequester {
fn new(
mapper: Box<dyn SharedMemoryMapper>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
) -> VmRequester {
VmRequester {
state: Rc::new(RefCell::new(VmRequesterState {
mapper,
#[cfg(feature = "minigbm")]
gralloc,
address_allocator: AddressAllocator::new(
AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
Some(pagesize() as u64),
None,
)
.expect("failed to create allocator"),
allocs: BTreeMap::new(),
next_alloc: 0,
})),
}
}
fn unregister_memory(&self, offset: u64) -> WlResult<()> {
let mut state = self.state.borrow_mut();
state
.mapper
.remove_mapping(offset)
.map_err(WlError::ShmemMapperError)?;
let alloc = state
.allocs
.remove(&offset)
.context("unknown offset")
.map_err(WlError::ShmemMapperError)?;
state
.address_allocator
.release(alloc)
.expect("corrupt address space");
Ok(())
}
#[cfg(feature = "minigbm")]
fn allocate_and_register_gpu_memory(
&self,
width: u32,
height: u32,
format: u32,
) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
let mut state = self.state.borrow_mut();
let img = ImageAllocationInfo {
width,
height,
drm_format: DrmFormat::from(format),
flags: RutabagaGrallocFlags::empty().use_linear(true),
};
let reqs = state
.gralloc
.get_image_memory_requirements(img)
.map_err(WlError::GrallocError)?;
let handle = state
.gralloc
.allocate_memory(reqs)
.map_err(WlError::GrallocError)?;
drop(state);
let safe_descriptor = to_safe_descriptor(handle.os_handle);
self.register_memory(
safe_descriptor
.try_clone()
.context("failed to dup gfx handle")
.map_err(WlError::ShmemMapperError)?,
reqs.size,
Protection::read_write(),
)
.map(|info| (info, safe_descriptor, reqs))
}
fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
let prot = match FileFlags::from_file(shm) {
Ok(FileFlags::Read) => Protection::read(),
Ok(FileFlags::Write) => Protection::write(),
Ok(FileFlags::ReadWrite) => {
let seals = shm.get_seals().map_err(WlError::GetSeals)?;
if seals.write_seal() {
Protection::read()
} else {
Protection::read_write()
}
}
Err(e) => {
return Err(WlError::ShmemMapperError(anyhow!(
"failed to get file descriptor flags with error: {:?}",
e
)))
}
};
self.register_memory(
SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
.context("failed to create safe descriptor")
.map_err(WlError::ShmemMapperError)?,
shm.size(),
prot,
)
}
fn register_memory(
&self,
descriptor: SafeDescriptor,
size: u64,
prot: Protection,
) -> WlResult<u64> {
let mut state = self.state.borrow_mut();
let size = round_up_to_page_size(size as usize) as u64;
let source = VmMemorySource::Descriptor {
descriptor,
offset: 0,
size,
};
let alloc = Alloc::Anon(state.next_alloc);
state.next_alloc += 1;
let offset = state
.address_allocator
.allocate(size, alloc, "virtio-wl".to_owned())
.context("failed to allocate offset")
.map_err(WlError::ShmemMapperError)?;
match state
.mapper
.add_mapping(source, offset, prot, MemCacheType::CacheCoherent)
{
Ok(()) => {
state.allocs.insert(offset, alloc);
Ok(offset)
}
Err(e) => {
state
.address_allocator
.release(alloc)
.expect("corrupt address space");
Err(WlError::ShmemMapperError(e))
}
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
struct CtrlHeader {
type_: Le32,
flags: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, FromZeroes, FromBytes, AsBytes)]
struct CtrlVfdNew {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
pfn: Le64,
size: Le32,
padding: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, FromZeroes, FromBytes)]
struct CtrlVfdNewCtxNamed {
hdr: CtrlHeader,
id: Le32,
flags: Le32, pfn: Le64, size: Le32, name: [u8; 32],
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
#[cfg(feature = "minigbm")]
struct CtrlVfdNewDmabuf {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
pfn: Le64,
size: Le32,
width: Le32,
height: Le32,
format: Le32,
stride0: Le32,
stride1: Le32,
stride2: Le32,
offset0: Le32,
offset1: Le32,
offset2: Le32,
}
#[cfg(feature = "minigbm")]
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
struct CtrlVfdDmabufSync {
hdr: CtrlHeader,
id: Le32,
flags: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, AsBytes, FromZeroes, FromBytes)]
struct CtrlVfdRecv {
hdr: CtrlHeader,
id: Le32,
vfd_count: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
struct CtrlVfd {
hdr: CtrlHeader,
id: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
struct CtrlVfdSend {
hdr: CtrlHeader,
id: Le32,
vfd_count: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
struct CtrlVfdSendVfd {
kind: Le32,
id: Le32,
}
#[repr(C)]
#[derive(Copy, Clone, FromZeroes, FromBytes)]
union CtrlVfdSendVfdV2Payload {
id: Le32,
seqno: Le64,
}
#[repr(C)]
#[derive(Copy, Clone, FromZeroes, FromBytes)]
struct CtrlVfdSendVfdV2 {
kind: Le32,
payload: CtrlVfdSendVfdV2Payload,
}
impl CtrlVfdSendVfdV2 {
fn id(&self) -> Le32 {
assert!(
self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
|| self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
);
unsafe { self.payload.id }
}
#[cfg(feature = "gpu")]
fn seqno(&self) -> Le64 {
assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
unsafe { self.payload.seqno }
}
}
#[derive(Debug)]
#[allow(dead_code)]
enum WlResp<'a> {
Ok,
VfdNew {
id: u32,
flags: u32,
pfn: u64,
size: u32,
resp: bool,
},
#[cfg(feature = "minigbm")]
VfdNewDmabuf {
id: u32,
flags: u32,
pfn: u64,
size: u32,
desc: GpuMemoryDesc,
},
VfdRecv {
id: u32,
data: &'a [u8],
vfds: &'a [u32],
},
VfdHup {
id: u32,
},
Err(Box<dyn StdError>),
OutOfMemory,
InvalidId,
InvalidType,
InvalidFlags,
InvalidCommand,
}
impl<'a> WlResp<'a> {
fn get_code(&self) -> u32 {
match *self {
WlResp::Ok => VIRTIO_WL_RESP_OK,
WlResp::VfdNew { resp, .. } => {
if resp {
VIRTIO_WL_RESP_VFD_NEW
} else {
VIRTIO_WL_CMD_VFD_NEW
}
}
#[cfg(feature = "minigbm")]
WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
}
}
}
#[derive(Default)]
struct WlVfd {
socket: Option<ScmSocket<UnixStream>>,
guest_shared_memory: Option<SharedMemory>,
remote_pipe: Option<File>,
local_pipe: Option<(u32 , File)>,
slot: Option<(u64 , VmRequester)>,
#[cfg(feature = "minigbm")]
is_dmabuf: bool,
#[cfg(feature = "minigbm")]
map_info: u32,
fence: Option<File>,
is_fence: bool,
}
impl fmt::Debug for WlVfd {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "WlVfd {{")?;
if let Some(s) = &self.socket {
write!(f, " socket: {}", s.as_raw_descriptor())?;
}
if let Some((offset, _)) = &self.slot {
write!(f, " offset: {}", offset)?;
}
if let Some(s) = &self.remote_pipe {
write!(f, " remote: {}", s.as_raw_descriptor())?;
}
if let Some((_, s)) = &self.local_pipe {
write!(f, " local: {}", s.as_raw_descriptor())?;
}
write!(f, " }}")
}
}
#[cfg(feature = "minigbm")]
fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
.from_shared_memory(shared_memory)
.build()
{
Ok(v) => v,
Err(_) => return Err(Error::new(EINVAL)),
};
if let Err(err) = mmap.flush_all() {
base::error!("failed to flush shared memory: {}", err);
return match err {
MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
_ => Err(Error::new(EINVAL)),
};
}
Ok(())
}
impl WlVfd {
fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
let mut vfd = WlVfd::default();
vfd.socket = Some(socket.try_into().map_err(WlError::SocketConnect)?);
Ok(vfd)
}
fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
let size_page_aligned = round_up_to_page_size(size as usize) as u64;
let vfd_shm =
SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
let offset = vm.register_shmem(&vfd_shm)?;
let mut vfd = WlVfd::default();
vfd.guest_shared_memory = Some(vfd_shm);
vfd.slot = Some((offset, vm));
Ok(vfd)
}
#[cfg(feature = "minigbm")]
fn dmabuf(
vm: VmRequester,
width: u32,
height: u32,
format: u32,
) -> WlResult<(WlVfd, GpuMemoryDesc)> {
let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
let mut vfd = WlVfd::default();
let vfd_shm =
SharedMemory::from_safe_descriptor(desc, reqs.size).map_err(WlError::NewAlloc)?;
let mut desc = GpuMemoryDesc::default();
for i in 0..3 {
desc.planes[i] = GpuMemoryPlaneDesc {
stride: reqs.strides[i],
offset: reqs.offsets[i],
}
}
vfd.guest_shared_memory = Some(vfd_shm);
vfd.slot = Some((offset, vm));
vfd.is_dmabuf = true;
vfd.map_info = reqs.map_info;
Ok((vfd, desc))
}
#[cfg(feature = "minigbm")]
fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
if !self.is_dmabuf {
return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
}
match &self.guest_shared_memory {
Some(descriptor) => {
let sync = dma_buf_sync {
flags: flags as u64,
};
if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC, &sync) } < 0 {
return Err(WlError::DmabufSync(io::Error::last_os_error()));
}
const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
if (flags & END_WRITE_MASK) == END_WRITE_MASK
&& (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
{
if let Err(err) = flush_shared_memory(descriptor) {
base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
err.errno(),
)));
}
}
Ok(())
}
None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
}
}
fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
let mut vfd = WlVfd::default();
vfd.remote_pipe = Some(read_pipe);
vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
Ok(vfd)
}
fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
let mut vfd = WlVfd::default();
vfd.remote_pipe = Some(write_pipe);
vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
Ok(vfd)
}
fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
if descriptor.seek(SeekFrom::End(0)).is_ok() {
let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
let offset = vm.register_shmem(&shm)?;
let mut vfd = WlVfd::default();
vfd.guest_shared_memory = Some(shm);
vfd.slot = Some((offset, vm));
Ok(vfd)
} else if is_fence(&descriptor) {
let mut vfd = WlVfd::default();
vfd.is_fence = true;
vfd.fence = Some(descriptor);
Ok(vfd)
} else {
let flags = match FileFlags::from_file(&descriptor) {
Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
_ => 0,
};
let mut vfd = WlVfd::default();
vfd.local_pipe = Some((flags, descriptor));
Ok(vfd)
}
}
fn flags(&self, use_transition_flags: bool) -> u32 {
let mut flags = 0;
if use_transition_flags {
if self.socket.is_some() {
flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
}
if let Some((f, _)) = self.local_pipe {
flags |= f;
}
if self.is_fence {
flags |= VIRTIO_WL_VFD_FENCE;
}
} else {
if self.socket.is_some() {
flags |= VIRTIO_WL_VFD_CONTROL;
}
if self.slot.is_some() {
flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
}
}
flags
}
fn offset(&self) -> Option<u64> {
self.slot.as_ref().map(|s| s.0)
}
fn size(&self) -> Option<u64> {
self.guest_shared_memory.as_ref().map(|shm| shm.size())
}
fn send_descriptor(&self) -> Option<RawDescriptor> {
self.guest_shared_memory
.as_ref()
.map(|shm| shm.as_raw_descriptor())
.or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
.or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
.or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
}
fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
self.socket
.as_ref()
.map(|s| s as &dyn AsRawDescriptor)
.or_else(|| {
self.local_pipe
.as_ref()
.map(|(_, p)| p as &dyn AsRawDescriptor)
})
.or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
}
fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
if let Some(socket) = &self.socket {
socket
.send_vectored_with_fds(&data.get_remaining(), rds)
.map_err(WlError::SendVfd)?;
data.consume(usize::MAX);
Ok(WlResp::Ok)
} else if let Some((_, local_pipe)) = &mut self.local_pipe {
if !rds.is_empty() {
return Ok(WlResp::InvalidType);
}
data.read_to(local_pipe, usize::MAX)
.map_err(WlError::WritePipe)?;
Ok(WlResp::Ok)
} else {
Ok(WlResp::InvalidType)
}
}
fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
if let Some(socket) = self.socket.take() {
let mut buf = vec![0; IN_BUFFER_LEN];
let (len, descriptors) = socket
.recv_with_fds(&mut buf, VIRTWL_SEND_MAX_ALLOCS)
.map_err(WlError::RecvVfd)?;
if len != 0 || !descriptors.is_empty() {
buf.truncate(len);
buf.shrink_to_fit();
self.socket = Some(socket);
in_file_queue.extend(descriptors.into_iter().map(File::from));
return Ok(buf);
}
Ok(Vec::new())
} else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
let mut buf = vec![0; IN_BUFFER_LEN];
let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
if len != 0 {
buf.truncate(len);
buf.shrink_to_fit();
self.local_pipe = Some((flags, local_pipe));
return Ok(buf);
}
Ok(Vec::new())
} else {
Ok(Vec::new())
}
}
fn close_remote(&mut self) {
self.remote_pipe = None;
}
fn close(&mut self) -> WlResult<()> {
if let Some((offset, vm)) = self.slot.take() {
vm.unregister_memory(offset)?;
}
self.socket = None;
self.remote_pipe = None;
self.local_pipe = None;
Ok(())
}
}
impl Drop for WlVfd {
fn drop(&mut self) {
let _ = self.close();
}
}
#[derive(Debug)]
enum WlRecv {
Vfd { id: u32 },
Data { buf: Vec<u8> },
Hup,
}
pub struct WlState {
wayland_paths: BTreeMap<String, PathBuf>,
vm: VmRequester,
resource_bridge: Option<Tube>,
use_transition_flags: bool,
wait_ctx: WaitContext<u32>,
vfds: BTreeMap<u32, WlVfd>,
next_vfd_id: u32,
in_file_queue: Vec<File>,
in_queue: VecDeque<(u32 , WlRecv)>,
current_recv_vfd: Option<u32>,
recv_vfds: Vec<u32>,
#[cfg(feature = "gpu")]
signaled_fence: Option<SafeDescriptor>,
use_send_vfd_v2: bool,
address_offset: Option<u64>,
}
impl WlState {
pub fn new(
wayland_paths: BTreeMap<String, PathBuf>,
mapper: Box<dyn SharedMemoryMapper>,
use_transition_flags: bool,
use_send_vfd_v2: bool,
resource_bridge: Option<Tube>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
address_offset: Option<u64>,
) -> WlState {
WlState {
wayland_paths,
vm: VmRequester::new(
mapper,
#[cfg(feature = "minigbm")]
gralloc,
),
resource_bridge,
wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
use_transition_flags,
vfds: BTreeMap::new(),
next_vfd_id: NEXT_VFD_ID_BASE,
in_file_queue: Vec::new(),
in_queue: VecDeque::new(),
current_recv_vfd: None,
recv_vfds: Vec::new(),
#[cfg(feature = "gpu")]
signaled_fence: None,
use_send_vfd_v2,
address_offset,
}
}
pub fn wait_ctx(&self) -> &WaitContext<u32> {
&self.wait_ctx
}
fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
return Ok(WlResp::InvalidFlags);
}
if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
return Ok(WlResp::InvalidFlags);
}
match self.vfds.entry(id) {
Entry::Vacant(entry) => {
let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
WlVfd::pipe_remote_read_local_write()?
} else if flags & VIRTIO_WL_VFD_READ != 0 {
WlVfd::pipe_remote_write_local_read()?
} else {
return Ok(WlResp::InvalidFlags);
};
self.wait_ctx
.add(vfd.wait_descriptor().unwrap(), id)
.map_err(WlError::WaitContextAdd)?;
let resp = WlResp::VfdNew {
id,
flags: 0,
pfn: 0,
size: 0,
resp: true,
};
entry.insert(vfd);
Ok(resp)
}
Entry::Occupied(_) => Ok(WlResp::InvalidId),
}
}
fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if self.use_transition_flags {
if flags != 0 {
return Ok(WlResp::InvalidFlags);
}
} else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
return Ok(WlResp::Err(Box::from("invalid flags")));
}
if self.vfds.contains_key(&id) {
return Ok(WlResp::InvalidId);
}
let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
let resp = WlResp::VfdNew {
id,
flags,
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
resp: true,
};
self.vfds.insert(id, vfd);
Ok(resp)
}
#[cfg(feature = "minigbm")]
fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
if self.vfds.contains_key(&id) {
return Ok(WlResp::InvalidId);
}
let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
let resp = WlResp::VfdNewDmabuf {
id,
flags: 0,
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
desc,
};
self.vfds.insert(id, vfd);
Ok(resp)
}
#[cfg(feature = "minigbm")]
fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
return Ok(WlResp::InvalidFlags);
}
match self.vfds.get_mut(&vfd_id) {
Some(vfd) => {
vfd.dmabuf_sync(flags)?;
Ok(WlResp::Ok)
}
None => Ok(WlResp::InvalidId),
}
}
fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
if id & VFD_ID_HOST_MASK != 0 {
return Ok(WlResp::InvalidId);
}
let flags = if self.use_transition_flags {
VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
} else {
VIRTIO_WL_VFD_CONTROL
};
match self.vfds.entry(id) {
Entry::Vacant(entry) => {
let vfd = entry.insert(WlVfd::connect(
self.wayland_paths
.get(name)
.ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
)?);
self.wait_ctx
.add(vfd.wait_descriptor().unwrap(), id)
.map_err(WlError::WaitContextAdd)?;
Ok(WlResp::VfdNew {
id,
flags,
pfn: 0,
size: 0,
resp: true,
})
}
Entry::Occupied(_) => Ok(WlResp::InvalidId),
}
}
fn process_wait_context(&mut self) {
let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
Ok(v) => v,
Err(e) => {
error!("failed waiting for vfd evens: {}", e);
return;
}
};
for event in events.iter().filter(|e| e.is_readable) {
if let Err(e) = self.recv(event.token) {
error!("failed to recv from vfd: {}", e)
}
}
for event in events.iter().filter(|e| e.is_hungup) {
if !event.is_readable {
let vfd_id = event.token;
if let Some(descriptor) =
self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
{
if let Err(e) = self.wait_ctx.delete(descriptor) {
warn!("failed to remove hungup vfd from poll context: {}", e);
}
}
self.in_queue.push_back((vfd_id, WlRecv::Hup));
}
}
}
fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
let mut to_delete = BTreeSet::new();
for (dest_vfd_id, q) in &self.in_queue {
if *dest_vfd_id == vfd_id {
if let WlRecv::Vfd { id } = q {
to_delete.insert(*id);
}
}
}
for vfd_id in to_delete {
let _ = self.close(vfd_id);
}
match self.vfds.remove(&vfd_id) {
Some(mut vfd) => {
self.in_queue.retain(|&(id, _)| id != vfd_id);
vfd.close()?;
Ok(WlResp::Ok)
}
None => Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
let sock = self.resource_bridge.as_ref().unwrap();
match get_resource_info(sock, request) {
Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
Ok(ResourceInfo::Fence { handle }) => Some(handle),
Err(ResourceBridgeError::InvalidResource(req)) => {
warn!("attempt to send non-existent gpu resource {}", req);
None
}
Err(e) => {
error!("{}", e);
self.resource_bridge = None;
None
}
}
}
fn send(
&mut self,
vfd_id: u32,
vfd_count: usize,
foreign_id: bool,
reader: &mut Reader,
) -> WlResult<WlResp> {
let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
kind: Le32::from(0),
payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
}; VIRTWL_SEND_MAX_ALLOCS];
for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
*vfd_id = if foreign_id {
if self.use_send_vfd_v2 {
reader.read_obj().map_err(WlError::ParseDesc)?
} else {
let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
CtrlVfdSendVfdV2 {
kind: vfd.kind,
payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
}
}
} else {
CtrlVfdSendVfdV2 {
kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
payload: CtrlVfdSendVfdV2Payload {
id: reader.read_obj().map_err(WlError::ParseDesc)?,
},
}
};
}
let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
#[cfg(feature = "gpu")]
let mut bridged_files = Vec::new();
for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
match send_vfd_id.kind.to_native() {
VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
match self.vfds.get(&send_vfd_id.id().to_native()) {
Some(vfd) => match vfd.send_descriptor() {
Some(vfd_fd) => *descriptor = vfd_fd,
None => return Ok(WlResp::InvalidType),
},
None => {
warn!(
"attempt to send non-existant vfd 0x{:08x}",
send_vfd_id.id().to_native()
);
return Ok(WlResp::InvalidId);
}
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
match self.get_info(ResourceRequest::GetBuffer {
id: send_vfd_id.id().to_native(),
}) {
Some(handle) => {
*descriptor = handle.as_raw_descriptor();
bridged_files.push(handle.into());
}
None => return Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
match self.get_info(ResourceRequest::GetFence {
seqno: send_vfd_id.seqno().to_native(),
}) {
Some(handle) => {
*descriptor = handle.as_raw_descriptor();
bridged_files.push(handle.into());
}
None => return Ok(WlResp::InvalidId),
}
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
if self.resource_bridge.is_some() =>
{
if self.signaled_fence.is_none() {
match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
Some(handle) => self.signaled_fence = Some(handle),
None => return Ok(WlResp::InvalidId),
}
}
match self.signaled_fence.as_ref().unwrap().try_clone() {
Ok(dup) => {
*descriptor = dup.into_raw_descriptor();
let file: File = unsafe {
base::FromRawDescriptor::from_raw_descriptor(*descriptor)
};
bridged_files.push(file);
}
Err(_) => return Ok(WlResp::InvalidId),
}
}
VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
| VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
| VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
let _ = self.resource_bridge.as_ref();
warn!("attempt to send foreign resource kind but feature is disabled");
}
kind => {
warn!("attempt to send unknown foreign resource kind: {}", kind);
return Ok(WlResp::InvalidId);
}
}
}
match self.vfds.get_mut(&vfd_id) {
Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
WlResp::Ok => {}
_ => return Ok(WlResp::InvalidType),
},
None => return Ok(WlResp::InvalidId),
}
for &send_vfd_id in &send_vfd_ids[..vfd_count] {
if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
vfd.close_remote();
}
}
}
Ok(WlResp::Ok)
}
fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
let buf = match self.vfds.get_mut(&vfd_id) {
Some(vfd) => {
if vfd.is_fence {
if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
warn!("failed to remove hungup vfd from poll context: {}", e);
}
self.in_queue.push_back((vfd_id, WlRecv::Hup));
return Ok(());
} else {
vfd.recv(&mut self.in_file_queue)?
}
}
None => return Ok(()),
};
if self.in_file_queue.is_empty() && buf.is_empty() {
self.in_queue.push_back((vfd_id, WlRecv::Hup));
return Ok(());
}
for file in self.in_file_queue.drain(..) {
let vfd = WlVfd::from_file(self.vm.clone(), file)?;
if let Some(wait_descriptor) = vfd.wait_descriptor() {
self.wait_ctx
.add(wait_descriptor, self.next_vfd_id)
.map_err(WlError::WaitContextAdd)?;
}
while self.vfds.contains_key(&self.next_vfd_id) {
self.next_vfd_id += 1;
}
self.vfds.insert(self.next_vfd_id, vfd);
self.in_queue.push_back((
vfd_id,
WlRecv::Vfd {
id: self.next_vfd_id,
},
));
self.next_vfd_id += 1;
}
self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
Ok(())
}
fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
let type_: Le32 = reader.peek_obj::<Le32>().map_err(WlError::ParseDesc)?;
match type_.into() {
VIRTIO_WL_CMD_VFD_NEW => {
let ctrl = reader
.read_obj::<CtrlVfdNew>()
.map_err(WlError::ParseDesc)?;
self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
}
VIRTIO_WL_CMD_VFD_CLOSE => {
let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
self.close(ctrl.id.into())
}
VIRTIO_WL_CMD_VFD_SEND => {
let ctrl = reader
.read_obj::<CtrlVfdSend>()
.map_err(WlError::ParseDesc)?;
let foreign_id = false;
self.send(
ctrl.id.into(),
ctrl.vfd_count.to_native() as usize,
foreign_id,
reader,
)
}
#[cfg(feature = "gpu")]
VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
let ctrl = reader
.read_obj::<CtrlVfdSend>()
.map_err(WlError::ParseDesc)?;
let foreign_id = true;
self.send(
ctrl.id.into(),
ctrl.vfd_count.to_native() as usize,
foreign_id,
reader,
)
}
VIRTIO_WL_CMD_VFD_NEW_CTX => {
let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
self.new_context(ctrl.id.into(), "")
}
VIRTIO_WL_CMD_VFD_NEW_PIPE => {
let ctrl = reader
.read_obj::<CtrlVfdNew>()
.map_err(WlError::ParseDesc)?;
self.new_pipe(ctrl.id.into(), ctrl.flags.into())
}
#[cfg(feature = "minigbm")]
VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
let ctrl = reader
.read_obj::<CtrlVfdNewDmabuf>()
.map_err(WlError::ParseDesc)?;
self.new_dmabuf(
ctrl.id.into(),
ctrl.width.into(),
ctrl.height.into(),
ctrl.format.into(),
)
}
#[cfg(feature = "minigbm")]
VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
let ctrl = reader
.read_obj::<CtrlVfdDmabufSync>()
.map_err(WlError::ParseDesc)?;
self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
}
VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
let ctrl = reader
.read_obj::<CtrlVfdNewCtxNamed>()
.map_err(WlError::ParseDesc)?;
let name_len = ctrl
.name
.iter()
.position(|x| x == &0)
.unwrap_or(ctrl.name.len());
let name =
std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
self.new_context(ctrl.id.into(), name)
}
op_type => {
warn!("unexpected command {}", op_type);
Ok(WlResp::InvalidCommand)
}
}
}
fn next_recv(&self) -> Option<WlResp> {
if let Some(q) = self.in_queue.front() {
match *q {
(vfd_id, WlRecv::Vfd { id }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
match self.vfds.get(&id) {
Some(vfd) => Some(WlResp::VfdNew {
id,
flags: vfd.flags(self.use_transition_flags),
pfn: self.compute_pfn(&vfd.offset()),
size: vfd.size().unwrap_or_default() as u32,
resp: false,
}),
_ => Some(WlResp::VfdNew {
id,
flags: 0,
pfn: 0,
size: 0,
resp: false,
}),
}
} else {
Some(WlResp::VfdRecv {
id: self.current_recv_vfd.unwrap(),
data: &[],
vfds: &self.recv_vfds[..],
})
}
}
(vfd_id, WlRecv::Data { ref buf }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
Some(WlResp::VfdRecv {
id: vfd_id,
data: &buf[..],
vfds: &self.recv_vfds[..],
})
} else {
Some(WlResp::VfdRecv {
id: self.current_recv_vfd.unwrap(),
data: &[],
vfds: &self.recv_vfds[..],
})
}
}
(vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
}
} else {
None
}
}
fn pop_recv(&mut self) {
if let Some(q) = self.in_queue.front() {
match *q {
(vfd_id, WlRecv::Vfd { id }) => {
if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
self.recv_vfds.push(id);
self.current_recv_vfd = Some(vfd_id);
} else {
self.recv_vfds.clear();
self.current_recv_vfd = None;
return;
}
}
(vfd_id, WlRecv::Data { .. }) => {
self.recv_vfds.clear();
self.current_recv_vfd = None;
if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
return;
}
}
(_, WlRecv::Hup) => {
self.recv_vfds.clear();
self.current_recv_vfd = None;
}
}
}
self.in_queue.pop_front();
}
fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
let addr = match (offset, self.address_offset) {
(Some(o), Some(address_offset)) => o + address_offset,
(Some(o), None) => *o,
(None, Some(_)) => 0,
(None, None) => WL_SHMEM_SIZE,
};
addr >> VIRTIO_WL_PFN_SHIFT
}
}
#[derive(ThisError, Debug, PartialEq, Eq)]
#[error("no descriptors available in queue")]
pub struct DescriptorsExhausted;
pub fn process_in_queue(
in_queue: &mut Queue,
state: &mut WlState,
) -> ::std::result::Result<(), DescriptorsExhausted> {
state.process_wait_context();
let mut needs_interrupt = false;
let mut exhausted_queue = false;
loop {
let mut desc = if let Some(d) = in_queue.peek() {
d
} else {
exhausted_queue = true;
break;
};
let mut should_pop = false;
if let Some(in_resp) = state.next_recv() {
match encode_resp(&mut desc.writer, in_resp) {
Ok(()) => {
should_pop = true;
}
Err(e) => {
error!("failed to encode response to descriptor chain: {}", e);
}
}
let bytes_written = desc.writer.bytes_written() as u32;
needs_interrupt = true;
let desc = desc.pop();
in_queue.add_used(desc, bytes_written);
} else {
break;
}
if should_pop {
state.pop_recv();
}
}
if needs_interrupt {
in_queue.trigger_interrupt();
}
if exhausted_queue {
Err(DescriptorsExhausted)
} else {
Ok(())
}
}
pub fn process_out_queue(out_queue: &mut Queue, state: &mut WlState) {
let mut needs_interrupt = false;
while let Some(mut desc) = out_queue.pop() {
let resp = match state.execute(&mut desc.reader) {
Ok(r) => r,
Err(e) => WlResp::Err(Box::new(e)),
};
match encode_resp(&mut desc.writer, resp) {
Ok(()) => {}
Err(e) => {
error!("failed to encode response to descriptor chain: {}", e);
}
}
let len = desc.writer.bytes_written() as u32;
out_queue.add_used(desc, len);
needs_interrupt = true;
}
if needs_interrupt {
out_queue.trigger_interrupt();
}
}
struct Worker {
interrupt: Interrupt,
in_queue: Queue,
out_queue: Queue,
state: WlState,
}
impl Worker {
fn new(
interrupt: Interrupt,
in_queue: Queue,
out_queue: Queue,
wayland_paths: BTreeMap<String, PathBuf>,
mapper: Box<dyn SharedMemoryMapper>,
use_transition_flags: bool,
use_send_vfd_v2: bool,
resource_bridge: Option<Tube>,
#[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
address_offset: Option<u64>,
) -> Worker {
Worker {
interrupt,
in_queue,
out_queue,
state: WlState::new(
wayland_paths,
mapper,
use_transition_flags,
use_send_vfd_v2,
resource_bridge,
#[cfg(feature = "minigbm")]
gralloc,
address_offset,
),
}
}
fn run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>> {
#[derive(EventToken)]
enum Token {
InQueue,
OutQueue,
Kill,
State,
InterruptResample,
}
let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
(self.in_queue.event(), Token::InQueue),
(self.out_queue.event(), Token::OutQueue),
(&kill_evt, Token::Kill),
(&self.state.wait_ctx, Token::State),
])
.context("failed creating WaitContext")?;
if let Some(resample_evt) = self.interrupt.get_resample_evt() {
wait_ctx
.add(resample_evt, Token::InterruptResample)
.context("failed adding resample event to WaitContext.")?;
}
let mut watching_state_ctx = true;
'wait: loop {
let events = match wait_ctx.wait() {
Ok(v) => v,
Err(e) => {
error!("failed waiting for events: {}", e);
break;
}
};
for event in &events {
match event.token {
Token::InQueue => {
let _ = self.in_queue.event().wait();
if !watching_state_ctx {
if let Err(e) =
wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
{
error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
break;
}
watching_state_ctx = true;
}
}
Token::OutQueue => {
let _ = self.out_queue.event().wait();
process_out_queue(&mut self.out_queue, &mut self.state);
}
Token::Kill => break 'wait,
Token::State => {
if let Err(DescriptorsExhausted) =
process_in_queue(&mut self.in_queue, &mut self.state)
{
if let Err(e) =
wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
{
error!(
"Failed to stop watching wait_ctx descriptor for WlState: {}",
e
);
break;
}
watching_state_ctx = false;
}
}
Token::InterruptResample => {
self.interrupt.interrupt_resample();
}
}
}
}
let in_queue = self.in_queue;
let out_queue = self.out_queue;
Ok(vec![in_queue, out_queue])
}
}
pub struct Wl {
worker_thread: Option<WorkerThread<anyhow::Result<Vec<Queue>>>>,
wayland_paths: BTreeMap<String, PathBuf>,
mapper: Option<Box<dyn SharedMemoryMapper>>,
resource_bridge: Option<Tube>,
base_features: u64,
acked_features: u64,
#[cfg(feature = "minigbm")]
gralloc: Option<RutabagaGralloc>,
address_offset: Option<u64>,
}
impl Wl {
pub fn new(
base_features: u64,
wayland_paths: BTreeMap<String, PathBuf>,
resource_bridge: Option<Tube>,
) -> Result<Wl> {
Ok(Wl {
worker_thread: None,
wayland_paths,
mapper: None,
resource_bridge,
base_features,
acked_features: 0,
#[cfg(feature = "minigbm")]
gralloc: None,
address_offset: None,
})
}
}
impl VirtioDevice for Wl {
fn keep_rds(&self) -> Vec<RawDescriptor> {
let mut keep_rds = Vec::new();
if let Some(mapper) = &self.mapper {
if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
keep_rds.push(raw_descriptor);
}
}
if let Some(resource_bridge) = &self.resource_bridge {
keep_rds.push(resource_bridge.as_raw_descriptor());
}
keep_rds
}
#[cfg(feature = "minigbm")]
fn on_device_sandboxed(&mut self) {
match RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()) {
Ok(g) => self.gralloc = Some(g),
Err(e) => {
error!("failed to initialize gralloc {:?}", e);
}
};
}
fn device_type(&self) -> DeviceType {
DeviceType::Wl
}
fn queue_max_sizes(&self) -> &[u16] {
QUEUE_SIZES
}
fn features(&self) -> u64 {
self.base_features
| 1 << VIRTIO_WL_F_TRANS_FLAGS
| 1 << VIRTIO_WL_F_SEND_FENCES
| 1 << VIRTIO_WL_F_USE_SHMEM
}
fn ack_features(&mut self, value: u64) {
self.acked_features |= value;
}
fn activate(
&mut self,
_mem: GuestMemory,
interrupt: Interrupt,
mut queues: BTreeMap<usize, Queue>,
) -> anyhow::Result<()> {
if queues.len() != QUEUE_SIZES.len() {
return Err(anyhow!(
"expected {} queues, got {}",
QUEUE_SIZES.len(),
queues.len()
));
}
let mapper = self.mapper.take().context("missing mapper")?;
let wayland_paths = self.wayland_paths.clone();
let use_transition_flags = self.acked_features & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0;
let use_send_vfd_v2 = self.acked_features & (1 << VIRTIO_WL_F_SEND_FENCES) != 0;
let use_shmem = self.acked_features & (1 << VIRTIO_WL_F_USE_SHMEM) != 0;
let resource_bridge = self.resource_bridge.take();
#[cfg(feature = "minigbm")]
let gralloc = self
.gralloc
.take()
.expect("gralloc already passed to worker");
let address_offset = if !use_shmem {
self.address_offset
} else {
None
};
self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
Worker::new(
interrupt,
queues.pop_first().unwrap().1,
queues.pop_first().unwrap().1,
wayland_paths,
mapper,
use_transition_flags,
use_send_vfd_v2,
resource_bridge,
#[cfg(feature = "minigbm")]
gralloc,
address_offset,
)
.run(kill_evt)
}));
Ok(())
}
fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
Some(SharedMemoryRegion {
id: WL_SHMEM_ID,
length: WL_SHMEM_SIZE,
})
}
fn set_shared_memory_region_base(&mut self, shmem_base: GuestAddress) {
self.address_offset = Some(shmem_base.0);
}
fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
self.mapper = Some(mapper);
}
fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
if let Some(worker_thread) = self.worker_thread.take() {
let queues = worker_thread.stop()?;
return Ok(Some(BTreeMap::from_iter(queues.into_iter().enumerate())));
}
Ok(None)
}
fn virtio_wake(
&mut self,
device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
) -> anyhow::Result<()> {
match device_state {
None => Ok(()),
Some((mem, interrupt, queues)) => {
self.activate(mem, interrupt, queues)?;
Ok(())
}
}
}
}