devices/virtio/video/
resource.rs

1// Copyright 2021 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Resource management and resolution for the virtio-video device.
6
7use std::convert::TryInto;
8use std::fmt;
9
10use base::linux::MemoryMappingBuilderUnix;
11use base::FromRawDescriptor;
12use base::IntoRawDescriptor;
13use base::MemoryMappingArena;
14use base::MemoryMappingBuilder;
15use base::MmapError;
16use base::SafeDescriptor;
17use thiserror::Error as ThisError;
18use vm_memory::GuestAddress;
19use vm_memory::GuestMemory;
20use vm_memory::GuestMemoryError;
21use zerocopy::FromBytes;
22use zerocopy::Immutable;
23use zerocopy::IntoBytes;
24use zerocopy::KnownLayout;
25
26use crate::virtio::resource_bridge;
27use crate::virtio::resource_bridge::ResourceBridgeError;
28use crate::virtio::resource_bridge::ResourceInfo;
29use crate::virtio::resource_bridge::ResourceRequest;
30use crate::virtio::video::format::Format;
31use crate::virtio::video::format::FramePlane;
32use crate::virtio::video::params::Params;
33use crate::virtio::video::protocol::virtio_video_mem_entry;
34use crate::virtio::video::protocol::virtio_video_object_entry;
35
36/// Defines how resources for a given queue are represented.
37#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
38pub enum ResourceType {
39    /// Resources are backed by guest memory pages.
40    GuestPages,
41    /// Resources are backed by virtio objects.
42    #[default]
43    VirtioObject,
44}
45
46#[repr(C)]
47#[derive(Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)]
48/// A guest resource entry which type is not decided yet.
49pub struct UnresolvedResourceEntry([u8; 16]);
50
51impl fmt::Debug for UnresolvedResourceEntry {
52    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
53        write!(f, "unresolved {:?}", self.0)
54    }
55}
56
57impl UnresolvedResourceEntry {
58    pub fn object(&self) -> virtio_video_object_entry {
59        virtio_video_object_entry::read_from_bytes(&self.0).unwrap()
60    }
61}
62
63/// Trait for types that can serve as video buffer backing memory.
64pub trait BufferHandle: Sized {
65    /// Try to clone this handle. This must only create a new reference to the same backing memory
66    /// and not duplicate the buffer itself.
67    fn try_clone(&self) -> Result<Self, base::Error>;
68
69    /// Returns a linear mapping of [`offset`..`offset`+`size`] of the memory backing this buffer.
70    fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>;
71}
72
73/// Linear memory area of a `GuestMemHandle`
74#[derive(Clone)]
75pub struct GuestMemArea {
76    /// Offset within the guest region to the start of the area.
77    pub offset: u64,
78    /// Length of the area within the memory region.
79    pub length: usize,
80}
81
82pub struct GuestMemHandle {
83    /// Descriptor to the guest memory region containing the buffer.
84    pub desc: SafeDescriptor,
85    /// Memory areas (i.e. sg list) that make the memory buffer.
86    pub mem_areas: Vec<GuestMemArea>,
87}
88
89impl BufferHandle for GuestMemHandle {
90    fn try_clone(&self) -> Result<Self, base::Error> {
91        Ok(Self {
92            desc: self.desc.try_clone()?,
93            mem_areas: self.mem_areas.clone(),
94        })
95    }
96
97    fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
98        let mut arena = MemoryMappingArena::new(size)?;
99        let mut mapped_size = 0;
100        let mut area_iter = self.mem_areas.iter();
101        let mut area_offset = offset;
102        while mapped_size < size {
103            let area = match area_iter.next() {
104                Some(area) => area,
105                None => {
106                    return Err(MmapError::InvalidRange(
107                        offset,
108                        size,
109                        self.mem_areas.iter().map(|a| a.length).sum(),
110                    ));
111                }
112            };
113            if area_offset > area.length {
114                area_offset -= area.length;
115            } else {
116                let mapping_length = std::cmp::min(area.length - area_offset, size - mapped_size);
117                arena.add_fd_offset(mapped_size, mapping_length, &self.desc, area.offset)?;
118                mapped_size += mapping_length;
119                area_offset = 0;
120            }
121        }
122        Ok(arena)
123    }
124}
125
126pub struct VirtioObjectHandle {
127    /// Descriptor for the object.
128    pub desc: SafeDescriptor,
129    /// Modifier to apply to frame resources.
130    pub modifier: u64,
131}
132
133impl BufferHandle for VirtioObjectHandle {
134    fn try_clone(&self) -> Result<Self, base::Error> {
135        Ok(Self {
136            desc: self.desc.try_clone()?,
137            modifier: self.modifier,
138        })
139    }
140
141    fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
142        MemoryMappingBuilder::new(size)
143            .from_descriptor(&self.desc)
144            .offset(offset as u64)
145            .build()
146            .map(MemoryMappingArena::from)
147    }
148}
149
150pub enum GuestResourceHandle {
151    GuestPages(GuestMemHandle),
152    VirtioObject(VirtioObjectHandle),
153}
154
155impl BufferHandle for GuestResourceHandle {
156    fn try_clone(&self) -> Result<Self, base::Error> {
157        Ok(match self {
158            Self::GuestPages(handle) => Self::GuestPages(handle.try_clone()?),
159            Self::VirtioObject(handle) => Self::VirtioObject(handle.try_clone()?),
160        })
161    }
162
163    fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
164        match self {
165            GuestResourceHandle::GuestPages(handle) => handle.get_mapping(offset, size),
166            GuestResourceHandle::VirtioObject(handle) => handle.get_mapping(offset, size),
167        }
168    }
169}
170
171pub struct GuestResource {
172    /// Handle to the backing memory.
173    pub handle: GuestResourceHandle,
174    /// Layout of color planes, if the resource will receive frames.
175    pub planes: Vec<FramePlane>,
176    pub width: u32,
177    pub height: u32,
178    pub format: Format,
179    /// Whether the buffer can be accessed by the guest CPU. This means the host must ensure that
180    /// all operations on the buffer are completed before passing it to the guest.
181    pub guest_cpu_mappable: bool,
182}
183
184#[derive(Debug, ThisError)]
185pub enum GuestMemResourceCreationError {
186    #[error("Provided slice of entries is empty")]
187    NoEntriesProvided,
188    #[error("cannot get shm region: {0}")]
189    CantGetShmRegion(GuestMemoryError),
190    #[error("cannot get shm offset: {0}")]
191    CantGetShmOffset(GuestMemoryError),
192    #[error("error while cloning shm region descriptor: {0}")]
193    DescriptorCloneError(base::Error),
194    #[error("guest memory with multiple shm objects not supported")]
195    MultipleShmObjects,
196}
197
198#[derive(Debug, ThisError)]
199pub enum ObjectResourceCreationError {
200    #[error("uuid {0:08} is larger than 32 bits")]
201    UuidNot32Bits(u128),
202    #[error("resource returned by bridge is not a buffer")]
203    NotABuffer,
204    #[error("resource bridge failure: {0}")]
205    ResourceBridgeFailure(ResourceBridgeError),
206}
207
208impl GuestResource {
209    /// Try to convert an unresolved virtio guest memory entry into a resolved guest memory
210    /// resource.
211    ///
212    /// Convert `mem_entry` into the guest memory resource it represents and resolve it through
213    /// `mem`.
214    /// Width, height and format is set from `params`.
215    ///
216    /// Panics if `params.format` is `None`.
217    pub fn from_virtio_guest_mem_entry(
218        mem_entries: &[virtio_video_mem_entry],
219        mem: &GuestMemory,
220        params: &Params,
221    ) -> Result<GuestResource, GuestMemResourceCreationError> {
222        let region_desc = match mem_entries.first() {
223            None => return Err(GuestMemResourceCreationError::NoEntriesProvided),
224            Some(entry) => {
225                let addr: u64 = entry.addr.into();
226
227                mem.shm_region(GuestAddress(addr))
228                    .map_err(GuestMemResourceCreationError::CantGetShmRegion)?
229            }
230        };
231
232        let mem_areas = mem_entries
233            .iter()
234            .map(|entry| {
235                let addr: u64 = entry.addr.into();
236                let length: u32 = entry.length.into();
237                let (backing_obj, region_offset) = mem
238                    .offset_from_base(GuestAddress(addr))
239                    .map_err(GuestMemResourceCreationError::CantGetShmOffset)
240                    .unwrap();
241                if region_desc.as_raw_descriptor() != backing_obj.as_raw_descriptor() {
242                    return Err(GuestMemResourceCreationError::MultipleShmObjects);
243                }
244
245                Ok(GuestMemArea {
246                    offset: region_offset,
247                    length: length as usize,
248                })
249            })
250            .collect::<Result<_, _>>()?;
251
252        let handle = GuestResourceHandle::GuestPages(GuestMemHandle {
253            desc: base::clone_descriptor(region_desc)
254                .map_err(GuestMemResourceCreationError::DescriptorCloneError)?,
255            mem_areas,
256        });
257
258        // The plane information can be computed from the currently set format.
259        let mut buffer_offset = 0;
260        let planes = params
261            .plane_formats
262            .iter()
263            .map(|p| {
264                let plane_offset = buffer_offset;
265                buffer_offset += p.plane_size;
266
267                FramePlane {
268                    offset: plane_offset as usize,
269                    stride: p.stride as usize,
270                    size: p.plane_size as usize,
271                }
272            })
273            .collect();
274
275        Ok(GuestResource {
276            handle,
277            planes,
278            width: params.frame_width,
279            height: params.frame_height,
280            format: params.format.unwrap(),
281            guest_cpu_mappable: true,
282        })
283    }
284
285    /// Try to convert an unresolved virtio object entry into a resolved object resource.
286    ///
287    /// Convert `object` into the object resource it represents and resolve it through `res_bridge`.
288    /// Returns an error if the object's UUID is invalid or cannot be resolved to a buffer object
289    /// by `res_bridge`.
290    pub fn from_virtio_object_entry(
291        object: virtio_video_object_entry,
292        res_bridge: &base::Tube,
293        params: &Params,
294    ) -> Result<GuestResource, ObjectResourceCreationError> {
295        // We trust that the caller has chosen the correct object type.
296        let uuid = u128::from_be_bytes(object.uuid);
297
298        // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
299        // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
300        // this conversion.
301        let handle = TryInto::<u32>::try_into(uuid)
302            .map_err(|_| ObjectResourceCreationError::UuidNot32Bits(uuid))?;
303
304        let buffer_info = match resource_bridge::get_resource_info(
305            res_bridge,
306            ResourceRequest::GetBuffer { id: handle },
307        ) {
308            Ok(ResourceInfo::Buffer(buffer_info)) => buffer_info,
309            Ok(_) => return Err(ObjectResourceCreationError::NotABuffer),
310            Err(e) => return Err(ObjectResourceCreationError::ResourceBridgeFailure(e)),
311        };
312
313        let handle = GuestResourceHandle::VirtioObject(VirtioObjectHandle {
314            // SAFETY:
315            // Safe because `buffer_info.file` is a valid file descriptor and we are stealing
316            // it.
317            desc: unsafe {
318                SafeDescriptor::from_raw_descriptor(buffer_info.handle.into_raw_descriptor())
319            },
320            modifier: buffer_info.modifier,
321        });
322
323        // TODO(ishitatsuyuki): Right now, there are two sources of metadata: through the
324        //                      virtio_video_params fields, or through the buffer metadata provided
325        //                      by the VirtioObject backend.
326        //                      Unfortunately neither is sufficient. The virtio_video_params struct
327        //                      lacks the plane offset, while some virtio-gpu backend doesn't
328        //                      have information about the plane size, or in some cases even the
329        //                      overall frame width and height.
330        //                      We will mix-and-match metadata from the more reliable data source
331        //                      below; ideally this should be fixed to use single source of truth.
332        let planes = params
333            .plane_formats
334            .iter()
335            .zip(&buffer_info.planes)
336            .map(|(param, buffer)| FramePlane {
337                // When the virtio object backend was implemented, the buffer and stride was sourced
338                // from the object backend's metadata (`buffer`). To lean on the safe side, we'll
339                // keep using data from `buffer`, even in case of stride it's also provided by
340                // `param`.
341                offset: buffer.offset as usize,
342                stride: buffer.stride as usize,
343                size: param.plane_size as usize,
344            })
345            .collect();
346
347        Ok(GuestResource {
348            handle,
349            planes,
350            width: params.frame_width,
351            height: params.frame_height,
352            format: params.format.unwrap(),
353            guest_cpu_mappable: buffer_info.guest_cpu_mappable,
354        })
355    }
356
357    #[cfg(feature = "video-encoder")]
358    pub fn try_clone(&self) -> Result<Self, base::Error> {
359        Ok(Self {
360            handle: self.handle.try_clone()?,
361            planes: self.planes.clone(),
362            width: self.width,
363            height: self.height,
364            format: self.format,
365            guest_cpu_mappable: self.guest_cpu_mappable,
366        })
367    }
368}
369
370#[cfg(test)]
371mod tests {
372    use base::MappedRegion;
373    use base::SharedMemory;
374
375    use super::*;
376
377    /// Creates a sparse guest memory handle using as many pages as there are entries in
378    /// `page_order`. The page with index `0` will be the first page, `1` will be the second page,
379    /// etc.
380    ///
381    /// The memory handle is filled with increasing u32s starting from page 0, then page 1, and so
382    /// on. Finally the handle is mapped into a linear space and we check that the written integers
383    /// appear in the expected order.
384    fn check_guest_mem_handle(page_order: &[usize]) {
385        const PAGE_SIZE: usize = 0x1000;
386        const U32_SIZE: usize = std::mem::size_of::<u32>();
387        const ENTRIES_PER_PAGE: usize = PAGE_SIZE / std::mem::size_of::<u32>();
388
389        // Fill a vector of the same size as the handle with u32s of increasing value, following
390        // the page layout given as argument.
391        let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
392        for (page_index, page) in page_order.iter().enumerate() {
393            let page_slice = &mut data[(page * PAGE_SIZE)..((page + 1) * PAGE_SIZE)];
394            for (index, chunk) in page_slice.chunks_exact_mut(4).enumerate() {
395                let sized_chunk: &mut [u8; 4] = chunk.try_into().unwrap();
396                *sized_chunk = (((page_index * ENTRIES_PER_PAGE) + index) as u32).to_ne_bytes();
397            }
398        }
399
400        // Copy the initialized vector's content into an anonymous shared memory.
401        let mem = SharedMemory::new("data-dest", data.len() as u64).unwrap();
402        let mapping = MemoryMappingBuilder::new(mem.size() as usize)
403            .from_shared_memory(&mem)
404            .build()
405            .unwrap();
406        assert_eq!(mapping.write_slice(&data, 0).unwrap(), data.len());
407
408        // Create the `GuestMemHandle` we will try to map and retrieve the data from.
409        let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
410            desc: base::clone_descriptor(&mem).unwrap(),
411            mem_areas: page_order
412                .iter()
413                .map(|&page| GuestMemArea {
414                    offset: page as u64 * PAGE_SIZE as u64,
415                    length: PAGE_SIZE,
416                })
417                .collect(),
418        });
419
420        // Map the handle into a linear memory area, retrieve its data into a new vector, and check
421        // that its u32s appear to increase linearly.
422        let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap();
423        let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
424        // SAFETY: src and dst are valid and aligned
425        unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) };
426        for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() {
427            let sized_chunk: &[u8; 4] = chunk.try_into().unwrap();
428            assert_eq!(u32::from_ne_bytes(*sized_chunk), index as u32);
429        }
430    }
431
432    // Fill a guest memory handle with a single memory page.
433    // Then check that the data can be properly mapped and appears in the expected order.
434    #[test]
435    fn test_single_guest_mem_handle() {
436        check_guest_mem_handle(&[0])
437    }
438
439    // Fill a guest memory handle with 4 memory pages that are contiguous.
440    // Then check that the pages appear in the expected order in the mapping.
441    #[test]
442    fn test_linear_guest_mem_handle() {
443        check_guest_mem_handle(&[0, 1, 2, 3])
444    }
445
446    // Fill a guest memory handle with 8 pages mapped in non-linear order.
447    // Then check that the pages appear in the expected order in the mapping.
448    #[test]
449    fn test_sparse_guest_mem_handle() {
450        check_guest_mem_handle(&[1, 7, 6, 3, 5, 0, 4, 2])
451    }
452}