vm_memory/
guest_memory.rs

1// Copyright 2017 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Track memory regions that are mapped to the guest VM.
6
7use std::convert::AsRef;
8use std::convert::TryFrom;
9use std::fs::File;
10use std::io::Read;
11use std::io::Write;
12use std::marker::Send;
13use std::marker::Sync;
14use std::result;
15use std::sync::Arc;
16
17use anyhow::bail;
18use anyhow::Context;
19use base::pagesize;
20use base::AsRawDescriptor;
21use base::AsRawDescriptors;
22use base::Error as SysError;
23use base::MappedRegion;
24use base::MemoryMapping;
25use base::MemoryMappingBuilder;
26use base::MmapError;
27use base::RawDescriptor;
28use base::SharedMemory;
29use base::VolatileMemory;
30use base::VolatileMemoryError;
31use base::VolatileSlice;
32use cros_async::mem;
33use cros_async::BackingMemory;
34use remain::sorted;
35use serde::Deserialize;
36use serde::Serialize;
37use serde_keyvalue::FromKeyValues;
38use snapshot::AnySnapshot;
39use thiserror::Error;
40use zerocopy::FromBytes;
41use zerocopy::Immutable;
42use zerocopy::IntoBytes;
43
44use crate::guest_address::GuestAddress;
45
46mod sys;
47pub use sys::MemoryPolicy;
48
49#[sorted]
50#[derive(Error, Debug)]
51pub enum Error {
52    #[error("failed to map guest memory to file: {0}")]
53    FiledBackedMemoryMappingFailed(#[source] MmapError),
54    #[error("failed to open file for file backed mapping: {0}")]
55    FiledBackedOpenFailed(#[source] std::io::Error),
56    #[error("invalid guest address {0}")]
57    InvalidGuestAddress(GuestAddress),
58    #[error("invalid guest range at {0} of size {1}")]
59    InvalidGuestRange(GuestAddress, u64),
60    #[error("invalid offset {0}")]
61    InvalidOffset(u64),
62    #[error("size {0} must not be zero")]
63    InvalidSize(usize),
64    #[error("invalid guest memory access at addr={0}: {1}")]
65    MemoryAccess(GuestAddress, #[source] MmapError),
66    #[error("failed to set seals on shm region: {0}")]
67    MemoryAddSealsFailed(#[source] SysError),
68    #[error("failed to create shm region: {0}")]
69    MemoryCreationFailed(#[source] SysError),
70    #[error("failed to map guest memory: {0}")]
71    MemoryMappingFailed(#[source] MmapError),
72    #[error("guest memory region {0}+{1:#x} is not page aligned")]
73    MemoryNotAligned(GuestAddress, u64),
74    #[error("memory regions overlap")]
75    MemoryRegionOverlap,
76    #[error("memory region size {0} is too large")]
77    MemoryRegionTooLarge(u128),
78    #[error("punch hole failed {0}")]
79    PunchHole(#[source] base::Error),
80    #[error("incomplete read of {completed} instead of {expected} bytes")]
81    ShortRead { expected: usize, completed: usize },
82    #[error("incomplete write of {completed} instead of {expected} bytes")]
83    ShortWrite { expected: usize, completed: usize },
84    #[error("DescriptorChain split is out of bounds: {0}")]
85    SplitOutOfBounds(usize),
86    #[error("{0}")]
87    VolatileMemoryAccess(#[source] VolatileMemoryError),
88}
89
90pub type Result<T> = result::Result<T, Error>;
91
92/// A file-like object backing `MemoryRegion`.
93#[derive(Clone, Debug)]
94pub enum BackingObject {
95    Shm(Arc<SharedMemory>),
96    File(Arc<File>),
97}
98
99impl AsRawDescriptor for BackingObject {
100    fn as_raw_descriptor(&self) -> RawDescriptor {
101        match self {
102            Self::Shm(shm) => shm.as_raw_descriptor(),
103            Self::File(f) => f.as_raw_descriptor(),
104        }
105    }
106}
107
108impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
109    fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
110        match self {
111            BackingObject::Shm(shm) => shm.as_ref(),
112            BackingObject::File(f) => f.as_ref(),
113        }
114    }
115}
116
117/// For MemoryRegion::regions
118pub struct MemoryRegionInformation<'a> {
119    pub index: usize,
120    pub guest_addr: GuestAddress,
121    pub size: usize,
122    pub host_addr: usize,
123    pub shm: &'a BackingObject,
124    pub shm_offset: u64,
125    pub options: MemoryRegionOptions,
126}
127
128#[sorted]
129#[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
130pub enum MemoryRegionPurpose {
131    /// BIOS/firmware ROM
132    Bios,
133
134    /// General purpose guest memory
135    #[default]
136    GuestMemoryRegion,
137
138    /// PVMFW
139    ProtectedFirmwareRegion,
140
141    /// An area that should be backed by a GuestMemory region but reported as reserved to the
142    /// guest.
143    ReservedMemory,
144
145    #[cfg(target_arch = "aarch64")]
146    StaticSwiotlbRegion,
147}
148
149#[derive(Clone, Debug, Serialize, Deserialize, FromKeyValues, PartialEq, Eq, PartialOrd, Ord)]
150#[serde(deny_unknown_fields)]
151pub struct FileBackedMappingParameters {
152    pub path: std::path::PathBuf,
153    #[serde(rename = "addr")]
154    pub address: u64,
155    pub size: u64,
156    #[serde(default)]
157    pub offset: u64,
158    #[serde(rename = "rw", default)]
159    pub writable: bool,
160    #[serde(default)]
161    pub sync: bool,
162    #[serde(default)]
163    pub align: bool,
164    /// Whether the mapping is for RAM or MMIO.
165    #[serde(default)]
166    pub ram: bool,
167}
168
169#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
170pub struct MemoryRegionOptions {
171    /// Some hypervisors (presently: Gunyah) need explicit knowledge about
172    /// which memory region is used for protected firwmare, static swiotlb,
173    /// or general purpose guest memory.
174    pub purpose: MemoryRegionPurpose,
175    /// Alignment for the mapping of this region. This intends to be used for
176    /// arm64 KVM support where a block alignment is required for transparent
177    /// huge-pages support
178    pub align: u64,
179    /// Backing file params.
180    pub file_backed: Option<FileBackedMappingParameters>,
181}
182
183impl MemoryRegionOptions {
184    pub fn new() -> MemoryRegionOptions {
185        Default::default()
186    }
187
188    pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
189        self.purpose = purpose;
190        self
191    }
192
193    pub fn align(mut self, alignment: u64) -> Self {
194        self.align = alignment;
195        self
196    }
197
198    pub fn file_backed(mut self, params: FileBackedMappingParameters) -> Self {
199        self.file_backed = Some(params);
200        self
201    }
202}
203
204/// A regions of memory mapped memory.
205/// Holds the memory mapping with its offset in guest memory.
206/// Also holds the backing object for the mapping and the offset in that object of the mapping.
207#[derive(Debug)]
208pub struct MemoryRegion {
209    mapping: MemoryMapping,
210    guest_base: GuestAddress,
211
212    shared_obj: BackingObject,
213    obj_offset: u64,
214
215    options: MemoryRegionOptions,
216}
217
218impl MemoryRegion {
219    /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
220    /// at `guest_base` address in the guest.
221    pub fn new_from_shm(
222        size: u64,
223        guest_base: GuestAddress,
224        offset: u64,
225        shm: Arc<SharedMemory>,
226    ) -> Result<Self> {
227        let mapping = MemoryMappingBuilder::new(size as usize)
228            .from_shared_memory(shm.as_ref())
229            .offset(offset)
230            .build()
231            .map_err(Error::MemoryMappingFailed)?;
232        Ok(MemoryRegion {
233            mapping,
234            guest_base,
235            shared_obj: BackingObject::Shm(shm),
236            obj_offset: offset,
237            options: Default::default(),
238        })
239    }
240
241    /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
242    /// address in the guest.
243    pub fn new_from_file(
244        size: u64,
245        guest_base: GuestAddress,
246        offset: u64,
247        file: Arc<File>,
248    ) -> Result<Self> {
249        let mapping = MemoryMappingBuilder::new(size as usize)
250            .from_file(&file)
251            .offset(offset)
252            .build()
253            .map_err(Error::MemoryMappingFailed)?;
254        Ok(MemoryRegion {
255            mapping,
256            guest_base,
257            shared_obj: BackingObject::File(file),
258            obj_offset: offset,
259            options: Default::default(),
260        })
261    }
262
263    fn start(&self) -> GuestAddress {
264        self.guest_base
265    }
266
267    fn end(&self) -> GuestAddress {
268        // unchecked_add is safe as the region bounds were checked when it was created.
269        self.guest_base.unchecked_add(self.mapping.size() as u64)
270    }
271
272    fn contains(&self, addr: GuestAddress) -> bool {
273        addr >= self.guest_base && addr < self.end()
274    }
275
276    #[cfg(any(target_os = "android", target_os = "linux"))] // unused on windows
277    fn contains_range(&self, addr: GuestAddress, size: u64) -> bool {
278        let Some(end_addr) = addr.checked_add(size) else {
279            return false;
280        };
281        addr >= self.guest_base && end_addr <= self.end()
282    }
283}
284
285/// Tracks memory regions and where they are mapped in the guest, along with shm
286/// descriptors of the underlying memory regions.
287#[derive(Clone, Debug)]
288pub struct GuestMemory {
289    regions: Arc<[MemoryRegion]>,
290    locked: bool,
291    use_punchhole_locked: bool,
292}
293
294impl AsRawDescriptors for GuestMemory {
295    /// USE WITH CAUTION, the descriptors returned here are not necessarily
296    /// files!
297    fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
298        self.regions
299            .iter()
300            .map(|r| r.shared_obj.as_raw_descriptor())
301            .collect()
302    }
303}
304
305impl GuestMemory {
306    /// Creates backing shm for GuestMemory regions
307    fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
308        let mut aligned_size = 0;
309        let pg_size = pagesize();
310        for range in ranges {
311            if range.2.file_backed.is_some() {
312                // Regions with a backing file don't use part of the `SharedMemory`.
313                continue;
314            }
315            if range.1 % pg_size as u64 != 0 {
316                return Err(Error::MemoryNotAligned(range.0, range.1));
317            }
318
319            aligned_size += range.1;
320        }
321
322        // NOTE: Some tests rely on the GuestMemory's name when capturing metrics.
323        let name = "crosvm_guest";
324        // Shm must be mut even though it is only updated on Unix systems.
325        #[allow(unused_mut)]
326        let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
327
328        sys::finalize_shm(&mut shm)?;
329
330        Ok(shm)
331    }
332
333    /// Creates a container for guest memory regions.
334    /// Valid memory regions are specified as a Vec of (Address, Size, MemoryRegionOptions)
335    pub fn new_with_options(
336        ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
337    ) -> Result<GuestMemory> {
338        // Create shm
339        let shm = Arc::new(GuestMemory::create_shm(ranges)?);
340
341        // Create memory regions
342        let mut regions = Vec::<MemoryRegion>::new();
343        let mut shm_offset = 0;
344
345        for range in ranges {
346            if let Some(last) = regions.last() {
347                if last
348                    .guest_base
349                    .checked_add(last.mapping.size() as u64)
350                    .is_none_or(|a| a > range.0)
351                {
352                    return Err(Error::MemoryRegionOverlap);
353                }
354            }
355
356            let size = usize::try_from(range.1)
357                .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
358            if let Some(file_backed) = &range.2.file_backed {
359                assert_eq!(usize::try_from(file_backed.size).unwrap(), size);
360                let file = file_backed.open().map_err(Error::FiledBackedOpenFailed)?;
361                let mapping = MemoryMappingBuilder::new(size)
362                    .from_file(&file)
363                    .offset(file_backed.offset)
364                    .align(range.2.align)
365                    .protection(if file_backed.writable {
366                        base::Protection::read_write()
367                    } else {
368                        base::Protection::read()
369                    })
370                    .build()
371                    .map_err(Error::FiledBackedMemoryMappingFailed)?;
372                regions.push(MemoryRegion {
373                    mapping,
374                    guest_base: range.0,
375                    shared_obj: BackingObject::File(Arc::new(file)),
376                    obj_offset: file_backed.offset,
377                    options: range.2.clone(),
378                });
379            } else {
380                let mapping = MemoryMappingBuilder::new(size)
381                    .from_shared_memory(shm.as_ref())
382                    .offset(shm_offset)
383                    .align(range.2.align)
384                    .build()
385                    .map_err(Error::MemoryMappingFailed)?;
386                regions.push(MemoryRegion {
387                    mapping,
388                    guest_base: range.0,
389                    shared_obj: BackingObject::Shm(shm.clone()),
390                    obj_offset: shm_offset,
391                    options: range.2.clone(),
392                });
393                shm_offset += size as u64;
394            }
395        }
396
397        Ok(GuestMemory {
398            regions: Arc::from(regions),
399            locked: false,
400            use_punchhole_locked: false,
401        })
402    }
403
404    /// Creates a container for guest memory regions.
405    /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
406    pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
407        GuestMemory::new_with_options(
408            ranges
409                .iter()
410                .map(|(addr, size)| (*addr, *size, Default::default()))
411                .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
412                .as_slice(),
413        )
414    }
415
416    /// Creates a `GuestMemory` from a collection of MemoryRegions.
417    pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
418        // Sort the regions and ensure non overlap.
419        regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
420
421        if regions.len() > 1 {
422            let mut prev_end = regions[0]
423                .guest_base
424                .checked_add(regions[0].mapping.size() as u64)
425                .ok_or(Error::MemoryRegionOverlap)?;
426            for region in &regions[1..] {
427                if prev_end > region.guest_base {
428                    return Err(Error::MemoryRegionOverlap);
429                }
430                prev_end = region
431                    .guest_base
432                    .checked_add(region.mapping.size() as u64)
433                    .ok_or(Error::MemoryRegionTooLarge(
434                        region.guest_base.0 as u128 + region.mapping.size() as u128,
435                    ))?;
436            }
437        }
438
439        Ok(GuestMemory {
440            regions: Arc::from(regions),
441            locked: false,
442            use_punchhole_locked: false,
443        })
444    }
445
446    // Whether `MemoryPolicy::LOCK_GUEST_MEMORY` was set.
447    pub fn locked(&self) -> bool {
448        self.locked
449    }
450
451    // Whether `MemoryPolicy::USE_PUNCHHOLE_LOCKED` was set.
452    pub fn use_punchhole_locked(&self) -> bool {
453        self.use_punchhole_locked
454    }
455
456    /// Returns the end address of memory.
457    ///
458    /// # Examples
459    ///
460    /// ```
461    /// # use base::MemoryMapping;
462    /// # use vm_memory::{GuestAddress, GuestMemory};
463    /// # fn test_end_addr() -> Result<(), ()> {
464    ///     let start_addr = GuestAddress(0x1000);
465    ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
466    ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
467    ///     Ok(())
468    /// # }
469    /// ```
470    pub fn end_addr(&self) -> GuestAddress {
471        self.regions
472            .iter()
473            .max_by_key(|region| region.start())
474            .map_or(GuestAddress(0), MemoryRegion::end)
475    }
476
477    /// Returns the total size of memory in bytes.
478    pub fn memory_size(&self) -> u64 {
479        self.regions
480            .iter()
481            .map(|region| region.mapping.size() as u64)
482            .sum()
483    }
484
485    /// Returns true if the given address is within the memory range available to the guest.
486    pub fn address_in_range(&self, addr: GuestAddress) -> bool {
487        self.regions.iter().any(|region| region.contains(addr))
488    }
489
490    /// Returns true if the given range (start, end) is overlap with the memory range
491    /// available to the guest.
492    pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
493        self.regions
494            .iter()
495            .any(|region| region.start() < end && start < region.end())
496    }
497
498    /// Returns an address `addr + offset` if it's in range.
499    ///
500    /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
501    /// guarantee it's a valid range, use `is_valid_range()` instead.
502    pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
503        addr.checked_add(offset).and_then(|a| {
504            if self.address_in_range(a) {
505                Some(a)
506            } else {
507                None
508            }
509        })
510    }
511
512    /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
513    /// range available to the guest and it's backed by a single underlying memory region.
514    pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
515        if length == 0 {
516            return false;
517        }
518
519        let end = if let Some(end) = start.checked_add(length - 1) {
520            end
521        } else {
522            return false;
523        };
524
525        self.regions
526            .iter()
527            .any(|region| region.start() <= start && end < region.end())
528    }
529
530    /// Returns the size of the memory region in bytes.
531    pub fn num_regions(&self) -> u64 {
532        self.regions.len() as u64
533    }
534
535    pub fn regions(&self) -> impl Iterator<Item = MemoryRegionInformation> {
536        self.regions
537            .iter()
538            .enumerate()
539            .map(|(index, region)| MemoryRegionInformation {
540                index,
541                guest_addr: region.start(),
542                size: region.mapping.size(),
543                host_addr: region.mapping.as_ptr() as usize,
544                shm: &region.shared_obj,
545                shm_offset: region.obj_offset,
546                options: region.options.clone(),
547            })
548    }
549
550    /// Writes a slice to guest memory at the specified guest address.
551    /// Returns the number of bytes written.  The number of bytes written can
552    /// be less than the length of the slice if there isn't enough room in the
553    /// memory region.
554    ///
555    /// # Examples
556    /// * Write a slice at guestaddress 0x200.
557    ///
558    /// ```
559    /// # use base::MemoryMapping;
560    /// # use vm_memory::{GuestAddress, GuestMemory};
561    /// # fn test_write_u64() -> Result<(), ()> {
562    /// #   let start_addr = GuestAddress(0x1000);
563    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
564    ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
565    ///     assert_eq!(5, res);
566    ///     Ok(())
567    /// # }
568    /// ```
569    pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
570        let (mapping, offset, _) = self.find_region(guest_addr)?;
571        mapping
572            .write_slice(buf, offset)
573            .map_err(|e| Error::MemoryAccess(guest_addr, e))
574    }
575
576    /// Writes the entire contents of a slice to guest memory at the specified
577    /// guest address.
578    ///
579    /// Returns an error if there isn't enough room in the memory region to
580    /// complete the entire write. Part of the data may have been written
581    /// nevertheless.
582    ///
583    /// # Examples
584    ///
585    /// ```
586    /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
587    ///
588    /// fn test_write_all() -> guest_memory::Result<()> {
589    ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
590    ///     let gm = GuestMemory::new(ranges)?;
591    ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
592    /// }
593    /// ```
594    pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
595        let expected = buf.len();
596        let completed = self.write_at_addr(buf, guest_addr)?;
597        if expected == completed {
598            Ok(())
599        } else {
600            Err(Error::ShortWrite {
601                expected,
602                completed,
603            })
604        }
605    }
606
607    /// Reads to a slice from guest memory at the specified guest address.
608    /// Returns the number of bytes read.  The number of bytes read can
609    /// be less than the length of the slice if there isn't enough room in the
610    /// memory region.
611    ///
612    /// # Examples
613    /// * Read a slice of length 16 at guestaddress 0x200.
614    ///
615    /// ```
616    /// # use base::MemoryMapping;
617    /// # use vm_memory::{GuestAddress, GuestMemory};
618    /// # fn test_write_u64() -> Result<(), ()> {
619    /// #   let start_addr = GuestAddress(0x1000);
620    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
621    ///     let buf = &mut [0u8; 16];
622    ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
623    ///     assert_eq!(16, res);
624    ///     Ok(())
625    /// # }
626    /// ```
627    pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
628        let (mapping, offset, _) = self.find_region(guest_addr)?;
629        mapping
630            .read_slice(buf, offset)
631            .map_err(|e| Error::MemoryAccess(guest_addr, e))
632    }
633
634    /// Reads from guest memory at the specified address to fill the entire
635    /// buffer.
636    ///
637    /// Returns an error if there isn't enough room in the memory region to fill
638    /// the entire buffer. Part of the buffer may have been filled nevertheless.
639    ///
640    /// # Examples
641    ///
642    /// ```
643    /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
644    ///
645    /// fn test_read_exact() -> guest_memory::Result<()> {
646    ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
647    ///     let gm = GuestMemory::new(ranges)?;
648    ///     let mut buffer = [0u8; 0x200];
649    ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
650    /// }
651    /// ```
652    pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
653        let expected = buf.len();
654        let completed = self.read_at_addr(buf, guest_addr)?;
655        if expected == completed {
656            Ok(())
657        } else {
658            Err(Error::ShortRead {
659                expected,
660                completed,
661            })
662        }
663    }
664
665    /// Reads an object from guest memory at the given guest address.
666    ///
667    /// # Examples
668    /// * Read a u64 from two areas of guest memory backed by separate mappings.
669    ///
670    /// ```
671    /// # use vm_memory::{GuestAddress, GuestMemory};
672    /// # fn test_read_u64() -> Result<u64, ()> {
673    /// #     let start_addr1 = GuestAddress(0x0);
674    /// #     let start_addr2 = GuestAddress(0x400);
675    /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
676    /// #         .map_err(|_| ())?;
677    ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
678    ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
679    /// #     Ok(num1 + num2)
680    /// # }
681    /// ```
682    pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
683        let (mapping, offset, _) = self.find_region(guest_addr)?;
684        mapping
685            .read_obj(offset)
686            .map_err(|e| Error::MemoryAccess(guest_addr, e))
687    }
688
689    /// Reads an object from guest memory at the given guest address.
690    /// Reading from a volatile area isn't strictly safe as it could change
691    /// mid-read.  However, as long as the type T is plain old data and can
692    /// handle random initialization, everything will be OK.
693    ///
694    /// The read operation will be volatile, i.e. it will not be reordered by
695    /// the compiler and is suitable for I/O, but must be aligned. When reading
696    /// from regular memory, prefer [`GuestMemory::read_obj_from_addr`].
697    ///
698    /// # Examples
699    /// * Read a u64 from two areas of guest memory backed by separate mappings.
700    ///
701    /// ```
702    /// # use vm_memory::{GuestAddress, GuestMemory};
703    /// # fn test_read_u64() -> Result<u64, ()> {
704    /// #     let start_addr1 = GuestAddress(0x0);
705    /// #     let start_addr2 = GuestAddress(0x400);
706    /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
707    /// #         .map_err(|_| ())?;
708    ///       let num1: u64 = gm.read_obj_from_addr_volatile(GuestAddress(32)).map_err(|_| ())?;
709    ///       let num2: u64 = gm.read_obj_from_addr_volatile(GuestAddress(0x400+32)).map_err(|_| ())?;
710    /// #     Ok(num1 + num2)
711    /// # }
712    /// ```
713    pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
714        let (mapping, offset, _) = self.find_region(guest_addr)?;
715        mapping
716            .read_obj_volatile(offset)
717            .map_err(|e| Error::MemoryAccess(guest_addr, e))
718    }
719
720    /// Writes an object to the memory region at the specified guest address.
721    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
722    ///
723    /// # Examples
724    /// * Write a u64 at guest address 0x1100.
725    ///
726    /// ```
727    /// # use vm_memory::{GuestAddress, GuestMemory};
728    /// # fn test_write_u64() -> Result<(), ()> {
729    /// #   let start_addr = GuestAddress(0x1000);
730    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
731    ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
732    ///         .map_err(|_| ())
733    /// # }
734    /// ```
735    pub fn write_obj_at_addr<T: IntoBytes + Immutable>(
736        &self,
737        val: T,
738        guest_addr: GuestAddress,
739    ) -> Result<()> {
740        let (mapping, offset, _) = self.find_region(guest_addr)?;
741        mapping
742            .write_obj(val, offset)
743            .map_err(|e| Error::MemoryAccess(guest_addr, e))
744    }
745
746    /// Writes an object to the memory region at the specified guest address.
747    /// Returns Ok(()) if the object fits, or Err if it extends past the end.
748    ///
749    /// The write operation will be volatile, i.e. it will not be reordered by
750    /// the compiler and is suitable for I/O, but must be aligned. When writing
751    /// to regular memory, prefer [`GuestMemory::write_obj_at_addr`].
752    /// # Examples
753    /// * Write a u64 at guest address 0x1100.
754    ///
755    /// ```
756    /// # use vm_memory::{GuestAddress, GuestMemory};
757    /// # fn test_write_u64() -> Result<(), ()> {
758    /// #   let start_addr = GuestAddress(0x1000);
759    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
760    ///     gm.write_obj_at_addr_volatile(55u64, GuestAddress(0x1100))
761    ///         .map_err(|_| ())
762    /// # }
763    /// ```
764    pub fn write_obj_at_addr_volatile<T: IntoBytes + Immutable>(
765        &self,
766        val: T,
767        guest_addr: GuestAddress,
768    ) -> Result<()> {
769        let (mapping, offset, _) = self.find_region(guest_addr)?;
770        mapping
771            .write_obj_volatile(val, offset)
772            .map_err(|e| Error::MemoryAccess(guest_addr, e))
773    }
774
775    /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
776    /// is not a subset of this `GuestMemory`.
777    ///
778    /// # Examples
779    /// * Write `99` to 30 bytes starting at guest address 0x1010.
780    ///
781    /// ```
782    /// # use base::MemoryMapping;
783    /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
784    /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
785    /// #   let start_addr = GuestAddress(0x1000);
786    /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
787    ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
788    ///     vslice.write_bytes(99);
789    /// #   Ok(())
790    /// # }
791    /// ```
792    pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
793        self.regions
794            .iter()
795            .find(|region| region.contains(addr))
796            .ok_or(Error::InvalidGuestAddress(addr))
797            .and_then(|region| {
798                // The cast to a usize is safe here because we know that `region.contains(addr)` and
799                // it's not possible for a memory region to be larger than what fits in a usize.
800                region
801                    .mapping
802                    .get_slice(addr.offset_from(region.start()) as usize, len)
803                    .map_err(Error::VolatileMemoryAccess)
804            })
805    }
806    /// Convert a GuestAddress into a pointer in the address space of this
807    /// process. This should only be necessary for giving addresses to the
808    /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
809    /// be done through `write_obj_at_addr`, `read_obj_from_addr`, etc.
810    ///
811    /// # Arguments
812    /// * `guest_addr` - Guest address to convert.
813    ///
814    /// # Examples
815    ///
816    /// ```
817    /// # use vm_memory::{GuestAddress, GuestMemory};
818    /// # fn test_host_addr() -> Result<(), ()> {
819    ///     let start_addr = GuestAddress(0x1000);
820    ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
821    ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
822    ///     println!("Host address is {:p}", addr);
823    ///     Ok(())
824    /// # }
825    /// ```
826    pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
827        let (mapping, offset, _) = self.find_region(guest_addr)?;
828        Ok(
829            // SAFETY:
830            // This is safe; `find_region` already checks that offset is in
831            // bounds.
832            unsafe { mapping.as_ptr().add(offset) } as *const u8,
833        )
834    }
835
836    /// Convert a GuestAddress into a pointer in the address space of this
837    /// process, and verify that the provided size define a valid range within
838    /// a single memory region. Similar to get_host_address(), this should only
839    /// be used for giving addresses to the kernel.
840    ///
841    /// # Arguments
842    /// * `guest_addr` - Guest address to convert.
843    /// * `size` - Size of the address range to be converted.
844    ///
845    /// # Examples
846    ///
847    /// ```
848    /// # use vm_memory::{GuestAddress, GuestMemory};
849    /// # fn test_host_addr() -> Result<(), ()> {
850    ///     let start_addr = GuestAddress(0x1000);
851    ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
852    ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
853    ///     println!("Host address is {:p}", addr);
854    ///     Ok(())
855    /// # }
856    /// ```
857    pub fn get_host_address_range(
858        &self,
859        guest_addr: GuestAddress,
860        size: usize,
861    ) -> Result<*const u8> {
862        if size == 0 {
863            return Err(Error::InvalidSize(size));
864        }
865
866        // Assume no overlap among regions
867        let (mapping, offset, _) = self.find_region(guest_addr)?;
868
869        if mapping.size().checked_sub(offset).is_none_or(|v| v < size) {
870            return Err(Error::InvalidGuestAddress(guest_addr));
871        }
872
873        Ok(
874            //SAFETY:
875            // This is safe; `find_region` already checks that offset is in
876            // bounds.
877            unsafe { mapping.as_ptr().add(offset) } as *const u8,
878        )
879    }
880
881    /// Returns a reference to the region that backs the given address.
882    pub fn shm_region(
883        &self,
884        guest_addr: GuestAddress,
885    ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
886        self.regions
887            .iter()
888            .find(|region| region.contains(guest_addr))
889            .ok_or(Error::InvalidGuestAddress(guest_addr))
890            .map(|region| region.shared_obj.as_ref())
891    }
892
893    /// Returns the region that contains the memory at `offset` from the base of guest memory.
894    pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
895        self.shm_region(
896            self.checked_offset(self.regions[0].guest_base, offset)
897                .ok_or(Error::InvalidOffset(offset))?,
898        )
899    }
900
901    /// Loops over all guest memory regions of `self`, and returns the
902    /// target region that contains `guest_addr`. On success, this
903    /// function returns a tuple with the following fields:
904    ///
905    /// (i) the memory mapping associated with the target region.
906    /// (ii) the relative offset from the start of the target region to `guest_addr`.
907    /// (iii) the absolute offset from the start of the backing object to the target region.
908    ///
909    /// If no target region is found, an error is returned.
910    pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
911        self.regions
912            .iter()
913            .find(|region| region.contains(guest_addr))
914            .ok_or(Error::InvalidGuestAddress(guest_addr))
915            .map(|region| {
916                (
917                    &region.mapping,
918                    guest_addr.offset_from(region.start()) as usize,
919                    region.obj_offset,
920                )
921            })
922    }
923
924    /// Convert a GuestAddress into an offset within the associated shm region.
925    ///
926    /// A `GuestMemory` may have multiple backing objects and the offset is
927    /// only meaningful in relation to the associated backing object, so a
928    /// reference to it is included in the return value.
929    ///
930    /// Due to potential gaps within GuestMemory, it is helpful to know the
931    /// offset within the shm where a given address is found. This offset
932    /// can then be passed to another process mapping the shm to read data
933    /// starting at that address.
934    ///
935    /// # Arguments
936    /// * `guest_addr` - Guest address to convert.
937    ///
938    /// # Examples
939    ///
940    /// ```
941    /// # use vm_memory::{GuestAddress, GuestMemory};
942    /// let addr_a = GuestAddress(0x10000);
943    /// let addr_b = GuestAddress(0x80000);
944    /// let mut gm = GuestMemory::new(&vec![
945    ///     (addr_a, 0x20000),
946    ///     (addr_b, 0x30000)]).expect("failed to create GuestMemory");
947    /// let (_backing_object, offset) = gm.offset_from_base(GuestAddress(0x95000))
948    ///                .expect("failed to get offset");
949    /// assert_eq!(offset, 0x35000);
950    /// ```
951    pub fn offset_from_base(
952        &self,
953        guest_addr: GuestAddress,
954    ) -> Result<(&(dyn AsRawDescriptor + Send + Sync), u64)> {
955        self.regions
956            .iter()
957            .find(|region| region.contains(guest_addr))
958            .ok_or(Error::InvalidGuestAddress(guest_addr))
959            .map(|region| {
960                (
961                    region.shared_obj.as_ref(),
962                    region.obj_offset + guest_addr.offset_from(region.start()),
963                )
964            })
965    }
966
967    /// Copy all guest memory into `w`.
968    ///
969    /// # Safety
970    /// Must have exclusive access to the guest memory for the duration of the
971    /// call (e.g. all vCPUs and devices must be stopped).
972    ///
973    /// Returns a JSON object that contains metadata about the underlying memory regions to allow
974    /// validation checks at restore time.
975    #[deny(unsafe_op_in_unsafe_fn)]
976    pub unsafe fn snapshot<T: Write>(
977        &self,
978        w: &mut T,
979        compress: bool,
980    ) -> anyhow::Result<AnySnapshot> {
981        fn go(
982            this: &GuestMemory,
983            w: &mut impl Write,
984        ) -> anyhow::Result<Vec<MemoryRegionSnapshotMetadata>> {
985            let mut regions = Vec::new();
986            for region in this.regions.iter() {
987                let data_ranges = region
988                    .find_data_ranges()
989                    .context("find_data_ranges failed")?;
990                for range in &data_ranges {
991                    let region_vslice = region
992                        .mapping
993                        .get_slice(range.start, range.end - range.start)?;
994                    // SAFETY:
995                    // 1. The data is guaranteed to be present & of expected length by the
996                    //    `VolatileSlice`.
997                    // 2. Aliasing the `VolatileSlice`'s memory is safe because a. The only mutable
998                    //    reference to it is held by the guest, and the guest's VCPUs are stopped
999                    //    (guaranteed by caller), so that mutable reference can be ignored (aliasing
1000                    //    is only an issue if temporal overlap occurs, and it does not here). b.
1001                    //    Some host code does manipulate guest memory through raw pointers. This
1002                    //    aliases the underlying memory of the slice, so we must ensure that host
1003                    //    code is not running (the caller guarantees this).
1004                    w.write_all(unsafe {
1005                        std::slice::from_raw_parts(region_vslice.as_ptr(), region_vslice.size())
1006                    })?;
1007                }
1008                regions.push(MemoryRegionSnapshotMetadata {
1009                    guest_base: region.guest_base.0,
1010                    size: region.mapping.size(),
1011                    data_ranges,
1012                });
1013            }
1014            Ok(regions)
1015        }
1016
1017        let regions = if compress {
1018            let mut w = lz4_flex::frame::FrameEncoder::new(w);
1019            let regions = go(self, &mut w)?;
1020            w.finish()?;
1021            regions
1022        } else {
1023            go(self, w)?
1024        };
1025
1026        AnySnapshot::to_any(MemorySnapshotMetadata {
1027            regions,
1028            compressed: compress,
1029        })
1030    }
1031
1032    /// Restore the guest memory using the bytes from `r`.
1033    ///
1034    /// # Safety
1035    /// Must have exclusive access to the guest memory for the duration of the
1036    /// call (e.g. all vCPUs and devices must be stopped).
1037    ///
1038    /// Returns an error if `metadata` doesn't match the configuration of the `GuestMemory` or if
1039    /// `r` doesn't produce exactly as many bytes as needed.
1040    #[deny(unsafe_op_in_unsafe_fn)]
1041    pub unsafe fn restore<T: Read>(&self, metadata: AnySnapshot, r: &mut T) -> anyhow::Result<()> {
1042        let metadata: MemorySnapshotMetadata = AnySnapshot::from_any(metadata)?;
1043
1044        let mut r: Box<dyn Read> = if metadata.compressed {
1045            Box::new(lz4_flex::frame::FrameDecoder::new(r))
1046        } else {
1047            Box::new(r)
1048        };
1049
1050        if self.regions.len() != metadata.regions.len() {
1051            bail!(
1052                "snapshot expected {} memory regions but VM has {}",
1053                metadata.regions.len(),
1054                self.regions.len()
1055            );
1056        }
1057        for (region, metadata) in self.regions.iter().zip(metadata.regions.iter()) {
1058            let MemoryRegionSnapshotMetadata {
1059                guest_base,
1060                size,
1061                data_ranges,
1062            } = metadata;
1063            if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
1064                bail!("snapshot memory regions don't match VM memory regions");
1065            }
1066
1067            let mut prev_end = 0;
1068            for range in data_ranges {
1069                let hole_size = range
1070                    .start
1071                    .checked_sub(prev_end)
1072                    .context("invalid data range")?;
1073                if hole_size > 0 {
1074                    region.zero_range(prev_end, hole_size)?;
1075                }
1076                let region_vslice = region
1077                    .mapping
1078                    .get_slice(range.start, range.end - range.start)?;
1079
1080                // SAFETY:
1081                // See `Self::snapshot` for the detailed safety statement, and
1082                // note that both mutable and non-mutable aliasing is safe.
1083                r.read_exact(unsafe {
1084                    std::slice::from_raw_parts_mut(region_vslice.as_mut_ptr(), region_vslice.size())
1085                })?;
1086
1087                prev_end = range.end;
1088            }
1089            let hole_size = region
1090                .mapping
1091                .size()
1092                .checked_sub(prev_end)
1093                .context("invalid data range")?;
1094            if hole_size > 0 {
1095                region.zero_range(prev_end, hole_size)?;
1096            }
1097        }
1098
1099        // Should always be at EOF at this point.
1100        let mut buf = [0];
1101        if r.read(&mut buf)? != 0 {
1102            bail!("too many bytes");
1103        }
1104
1105        Ok(())
1106    }
1107}
1108
1109#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1110struct MemorySnapshotMetadata {
1111    regions: Vec<MemoryRegionSnapshotMetadata>,
1112    compressed: bool,
1113}
1114
1115#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1116struct MemoryRegionSnapshotMetadata {
1117    guest_base: u64,
1118    size: usize,
1119    // Ranges of the mmap that are stored in the snapshot file. All other ranges of the region are
1120    // zeros.
1121    data_ranges: Vec<std::ops::Range<usize>>,
1122}
1123
1124// SAFETY:
1125// It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
1126unsafe impl BackingMemory for GuestMemory {
1127    fn get_volatile_slice(
1128        &self,
1129        mem_range: cros_async::MemRegion,
1130    ) -> mem::Result<VolatileSlice<'_>> {
1131        self.get_slice_at_addr(GuestAddress(mem_range.offset), mem_range.len)
1132            .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1133    }
1134}
1135
1136#[cfg(test)]
1137mod tests {
1138    use super::*;
1139
1140    #[test]
1141    fn test_alignment() {
1142        let start_addr1 = GuestAddress(0x0);
1143        let start_addr2 = GuestAddress(0x10000);
1144
1145        assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1146        assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1147    }
1148
1149    #[test]
1150    fn two_regions() {
1151        let start_addr1 = GuestAddress(0x0);
1152        let start_addr2 = GuestAddress(0x10000);
1153        // The memory regions are `[0x0, 0x10000)`, `[0x10000, 0x20000)`.
1154        let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1155
1156        // Although each address in `[0x0, 0x20000)` is valid, `is_valid_range()` returns false for
1157        // a range that is across multiple underlying regions.
1158        assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1159        assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1160        assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1161    }
1162
1163    #[test]
1164    fn overlap_memory() {
1165        let start_addr1 = GuestAddress(0x0);
1166        let start_addr2 = GuestAddress(0x10000);
1167        assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1168    }
1169
1170    #[test]
1171    fn region_hole() {
1172        let start_addr1 = GuestAddress(0x0);
1173        let start_addr2 = GuestAddress(0x40000);
1174        // The memory regions are `[0x0, 0x20000)`, `[0x40000, 0x60000)`.
1175        let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1176
1177        assert!(gm.address_in_range(GuestAddress(0x10000)));
1178        assert!(!gm.address_in_range(GuestAddress(0x30000)));
1179        assert!(gm.address_in_range(GuestAddress(0x50000)));
1180        assert!(!gm.address_in_range(GuestAddress(0x60000)));
1181        assert!(!gm.address_in_range(GuestAddress(0x60000)));
1182        assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1183        assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1184        assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1185        assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1186        assert_eq!(
1187            gm.checked_offset(GuestAddress(0x50000), 0x8000),
1188            Some(GuestAddress(0x58000))
1189        );
1190        assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1191        assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1192        assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1193        assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1194
1195        // While `checked_offset(GuestAddress(0x10000), 0x40000)` succeeds because 0x50000 is a
1196        // valid address, `is_valid_range(GuestAddress(0x10000), 0x40000)` returns `false`
1197        // because there is a hole inside of [0x10000, 0x50000).
1198        assert_eq!(
1199            gm.checked_offset(GuestAddress(0x10000), 0x40000),
1200            Some(GuestAddress(0x50000))
1201        );
1202        assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1203    }
1204
1205    #[test]
1206    fn test_read_u64() {
1207        let start_addr1 = GuestAddress(0x0);
1208        let start_addr2 = GuestAddress(0x10000);
1209        let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1210
1211        let val1: u64 = 0xaa55aa55aa55aa55;
1212        let val2: u64 = 0x55aa55aa55aa55aa;
1213        gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1214        gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1215            .unwrap();
1216        let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1217        let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1218        assert_eq!(val1, num1);
1219        assert_eq!(val2, num2);
1220    }
1221
1222    #[test]
1223    fn test_memory_size() {
1224        let start_region1 = GuestAddress(0x0);
1225        let size_region1 = 0x10000;
1226        let start_region2 = GuestAddress(0x10000);
1227        let size_region2 = 0x20000;
1228        let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1229            .unwrap();
1230
1231        let mem_size = gm.memory_size();
1232        assert_eq!(mem_size, size_region1 + size_region2);
1233    }
1234
1235    // Get the base address of the mapping for a GuestAddress.
1236    fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1237        Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1238    }
1239
1240    #[test]
1241    fn guest_to_host() {
1242        let start_addr1 = GuestAddress(0x0);
1243        let start_addr2 = GuestAddress(0x10000);
1244        let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1245
1246        // Verify the host addresses match what we expect from the mappings.
1247        let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1248        let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1249        let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1250        let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1251        assert_eq!(host_addr1, addr1_base);
1252        assert_eq!(host_addr2, addr2_base);
1253
1254        // Check that a bad address returns an error.
1255        let bad_addr = GuestAddress(0x123456);
1256        assert!(mem.get_host_address(bad_addr).is_err());
1257    }
1258
1259    #[test]
1260    fn guest_to_host_range() {
1261        let start_addr1 = GuestAddress(0x0);
1262        let start_addr2 = GuestAddress(0x10000);
1263        let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1264
1265        // Verify the host addresses match what we expect from the mappings.
1266        let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1267        let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1268        let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1269        let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1270        assert_eq!(host_addr1, addr1_base);
1271        assert_eq!(host_addr2, addr2_base);
1272
1273        let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1274        assert_eq!(host_addr3, addr2_base);
1275
1276        // Check that a valid guest address with an invalid size returns an error.
1277        assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1278
1279        // Check that a bad address returns an error.
1280        let bad_addr = GuestAddress(0x123456);
1281        assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1282    }
1283
1284    #[test]
1285    fn shm_offset() {
1286        let start_region1 = GuestAddress(0x0);
1287        let size_region1 = 0x10000;
1288        let start_region2 = GuestAddress(0x10000);
1289        let size_region2 = 0x20000;
1290        let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1291            .unwrap();
1292
1293        gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1294        gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1295            .unwrap();
1296
1297        for region in gm.regions() {
1298            let shm = match region.shm {
1299                BackingObject::Shm(s) => s,
1300                _ => {
1301                    panic!("backing object isn't SharedMemory");
1302                }
1303            };
1304            let mmap = MemoryMappingBuilder::new(region.size)
1305                .from_shared_memory(shm)
1306                .offset(region.shm_offset)
1307                .build()
1308                .unwrap();
1309
1310            if region.index == 0 {
1311                assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1312            }
1313
1314            if region.index == 1 {
1315                assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1316            }
1317        }
1318    }
1319
1320    #[test]
1321    // Disabled for non-x86 because test infra uses qemu-user, which doesn't support MADV_REMOVE.
1322    #[cfg(target_arch = "x86_64")]
1323    fn snapshot_restore() {
1324        let regions = &[
1325            // Hole at start.
1326            (GuestAddress(0x0), 0x10000),
1327            // Hole at end.
1328            (GuestAddress(0x10000), 0x10000),
1329            // Hole in middle.
1330            (GuestAddress(0x20000), 0x10000),
1331            // All holes.
1332            (GuestAddress(0x30000), 0x10000),
1333            // No holes.
1334            (GuestAddress(0x40000), 0x1000),
1335        ];
1336        let writes = &[
1337            (GuestAddress(0x0FFF0), 1u64),
1338            (GuestAddress(0x10000), 2u64),
1339            (GuestAddress(0x29000), 3u64),
1340            (GuestAddress(0x40000), 4u64),
1341        ];
1342
1343        let gm = GuestMemory::new(regions).unwrap();
1344        for &(addr, value) in writes {
1345            gm.write_obj_at_addr(value, addr).unwrap();
1346        }
1347
1348        let mut data = tempfile::tempfile().unwrap();
1349        // SAFETY:
1350        // no vm is running
1351        let metadata_json = unsafe { gm.snapshot(&mut data, false).unwrap() };
1352        let metadata: MemorySnapshotMetadata =
1353            AnySnapshot::from_any(metadata_json.clone()).unwrap();
1354
1355        #[cfg(unix)]
1356        assert_eq!(
1357            metadata,
1358            MemorySnapshotMetadata {
1359                regions: vec![
1360                    MemoryRegionSnapshotMetadata {
1361                        guest_base: 0,
1362                        size: 0x10000,
1363                        data_ranges: vec![0x0F000..0x10000],
1364                    },
1365                    MemoryRegionSnapshotMetadata {
1366                        guest_base: 0x10000,
1367                        size: 0x10000,
1368                        data_ranges: vec![0x00000..0x01000],
1369                    },
1370                    MemoryRegionSnapshotMetadata {
1371                        guest_base: 0x20000,
1372                        size: 0x10000,
1373                        data_ranges: vec![0x09000..0x0A000],
1374                    },
1375                    MemoryRegionSnapshotMetadata {
1376                        guest_base: 0x30000,
1377                        size: 0x10000,
1378                        data_ranges: vec![],
1379                    },
1380                    MemoryRegionSnapshotMetadata {
1381                        guest_base: 0x40000,
1382                        size: 0x1000,
1383                        data_ranges: vec![0x00000..0x01000],
1384                    }
1385                ],
1386                compressed: false,
1387            }
1388        );
1389        // We can't detect the holes on Windows yet.
1390        #[cfg(windows)]
1391        assert_eq!(
1392            metadata,
1393            MemorySnapshotMetadata {
1394                regions: vec![
1395                    MemoryRegionSnapshotMetadata {
1396                        guest_base: 0,
1397                        size: 0x10000,
1398                        data_ranges: vec![0x00000..0x10000],
1399                    },
1400                    MemoryRegionSnapshotMetadata {
1401                        guest_base: 0x10000,
1402                        size: 0x10000,
1403                        data_ranges: vec![0x00000..0x10000],
1404                    },
1405                    MemoryRegionSnapshotMetadata {
1406                        guest_base: 0x20000,
1407                        size: 0x10000,
1408                        data_ranges: vec![0x00000..0x10000],
1409                    },
1410                    MemoryRegionSnapshotMetadata {
1411                        guest_base: 0x30000,
1412                        size: 0x10000,
1413                        data_ranges: vec![0x00000..0x10000],
1414                    },
1415                    MemoryRegionSnapshotMetadata {
1416                        guest_base: 0x40000,
1417                        size: 0x1000,
1418                        data_ranges: vec![0x00000..0x01000],
1419                    }
1420                ],
1421                compressed: false,
1422            }
1423        );
1424
1425        std::mem::drop(gm);
1426
1427        let gm2 = GuestMemory::new(regions).unwrap();
1428
1429        // Write to a hole so we can assert the restore zeroes it.
1430        let hole_addr = GuestAddress(0x30000);
1431        gm2.write_obj_at_addr(8u64, hole_addr).unwrap();
1432
1433        use std::io::Seek;
1434        data.seek(std::io::SeekFrom::Start(0)).unwrap();
1435        // SAFETY:
1436        // no vm is running
1437        unsafe { gm2.restore(metadata_json, &mut data).unwrap() };
1438
1439        assert_eq!(gm2.read_obj_from_addr::<u64>(hole_addr).unwrap(), 0);
1440        for &(addr, value) in writes {
1441            assert_eq!(gm2.read_obj_from_addr::<u64>(addr).unwrap(), value);
1442        }
1443    }
1444}