1use std::convert::AsRef;
8use std::convert::TryFrom;
9use std::fs::File;
10use std::io::Read;
11use std::io::Write;
12use std::marker::Send;
13use std::marker::Sync;
14use std::result;
15use std::sync::Arc;
16
17use anyhow::bail;
18use anyhow::Context;
19use base::pagesize;
20use base::AsRawDescriptor;
21use base::AsRawDescriptors;
22use base::Error as SysError;
23use base::MappedRegion;
24use base::MemoryMapping;
25use base::MemoryMappingBuilder;
26use base::MmapError;
27use base::RawDescriptor;
28use base::SharedMemory;
29use base::VolatileMemory;
30use base::VolatileMemoryError;
31use base::VolatileSlice;
32use cros_async::mem;
33use cros_async::BackingMemory;
34use remain::sorted;
35use serde::Deserialize;
36use serde::Serialize;
37use serde_keyvalue::FromKeyValues;
38use snapshot::AnySnapshot;
39use thiserror::Error;
40use zerocopy::FromBytes;
41use zerocopy::Immutable;
42use zerocopy::IntoBytes;
43
44use crate::guest_address::GuestAddress;
45
46mod sys;
47pub use sys::MemoryPolicy;
48
49#[sorted]
50#[derive(Error, Debug)]
51pub enum Error {
52 #[error("failed to map guest memory to file: {0}")]
53 FiledBackedMemoryMappingFailed(#[source] MmapError),
54 #[error("failed to open file for file backed mapping: {0}")]
55 FiledBackedOpenFailed(#[source] std::io::Error),
56 #[error("invalid guest address {0}")]
57 InvalidGuestAddress(GuestAddress),
58 #[error("invalid guest range at {0} of size {1}")]
59 InvalidGuestRange(GuestAddress, u64),
60 #[error("invalid offset {0}")]
61 InvalidOffset(u64),
62 #[error("size {0} must not be zero")]
63 InvalidSize(usize),
64 #[error("invalid guest memory access at addr={0}: {1}")]
65 MemoryAccess(GuestAddress, #[source] MmapError),
66 #[error("failed to set seals on shm region: {0}")]
67 MemoryAddSealsFailed(#[source] SysError),
68 #[error("failed to create shm region: {0}")]
69 MemoryCreationFailed(#[source] SysError),
70 #[error("failed to map guest memory: {0}")]
71 MemoryMappingFailed(#[source] MmapError),
72 #[error("guest memory region {0}+{1:#x} is not page aligned")]
73 MemoryNotAligned(GuestAddress, u64),
74 #[error("memory regions overlap")]
75 MemoryRegionOverlap,
76 #[error("memory region size {0} is too large")]
77 MemoryRegionTooLarge(u128),
78 #[error("punch hole failed {0}")]
79 PunchHole(#[source] base::Error),
80 #[error("incomplete read of {completed} instead of {expected} bytes")]
81 ShortRead { expected: usize, completed: usize },
82 #[error("incomplete write of {completed} instead of {expected} bytes")]
83 ShortWrite { expected: usize, completed: usize },
84 #[error("DescriptorChain split is out of bounds: {0}")]
85 SplitOutOfBounds(usize),
86 #[error("{0}")]
87 VolatileMemoryAccess(#[source] VolatileMemoryError),
88}
89
90pub type Result<T> = result::Result<T, Error>;
91
92#[derive(Clone, Debug)]
94pub enum BackingObject {
95 Shm(Arc<SharedMemory>),
96 File(Arc<File>),
97}
98
99impl AsRawDescriptor for BackingObject {
100 fn as_raw_descriptor(&self) -> RawDescriptor {
101 match self {
102 Self::Shm(shm) => shm.as_raw_descriptor(),
103 Self::File(f) => f.as_raw_descriptor(),
104 }
105 }
106}
107
108impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
109 fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
110 match self {
111 BackingObject::Shm(shm) => shm.as_ref(),
112 BackingObject::File(f) => f.as_ref(),
113 }
114 }
115}
116
117pub struct MemoryRegionInformation<'a> {
119 pub index: usize,
120 pub guest_addr: GuestAddress,
121 pub size: usize,
122 pub host_addr: usize,
123 pub shm: &'a BackingObject,
124 pub shm_offset: u64,
125 pub options: MemoryRegionOptions,
126}
127
128#[sorted]
129#[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
130pub enum MemoryRegionPurpose {
131 Bios,
133
134 #[default]
136 GuestMemoryRegion,
137
138 ProtectedFirmwareRegion,
140
141 ReservedMemory,
144
145 #[cfg(target_arch = "aarch64")]
146 StaticSwiotlbRegion,
147}
148
149#[derive(Clone, Debug, Serialize, Deserialize, FromKeyValues, PartialEq, Eq, PartialOrd, Ord)]
150#[serde(deny_unknown_fields)]
151pub struct FileBackedMappingParameters {
152 pub path: std::path::PathBuf,
153 #[serde(rename = "addr")]
154 pub address: u64,
155 pub size: u64,
156 #[serde(default)]
157 pub offset: u64,
158 #[serde(rename = "rw", default)]
159 pub writable: bool,
160 #[serde(default)]
161 pub sync: bool,
162 #[serde(default)]
163 pub align: bool,
164 #[serde(default)]
166 pub ram: bool,
167}
168
169#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
170pub struct MemoryRegionOptions {
171 pub purpose: MemoryRegionPurpose,
175 pub align: u64,
179 pub file_backed: Option<FileBackedMappingParameters>,
181}
182
183impl MemoryRegionOptions {
184 pub fn new() -> MemoryRegionOptions {
185 Default::default()
186 }
187
188 pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
189 self.purpose = purpose;
190 self
191 }
192
193 pub fn align(mut self, alignment: u64) -> Self {
194 self.align = alignment;
195 self
196 }
197
198 pub fn file_backed(mut self, params: FileBackedMappingParameters) -> Self {
199 self.file_backed = Some(params);
200 self
201 }
202}
203
204#[derive(Debug)]
208pub struct MemoryRegion {
209 mapping: MemoryMapping,
210 guest_base: GuestAddress,
211
212 shared_obj: BackingObject,
213 obj_offset: u64,
214
215 options: MemoryRegionOptions,
216}
217
218impl MemoryRegion {
219 pub fn new_from_shm(
222 size: u64,
223 guest_base: GuestAddress,
224 offset: u64,
225 shm: Arc<SharedMemory>,
226 ) -> Result<Self> {
227 let mapping = MemoryMappingBuilder::new(size as usize)
228 .from_shared_memory(shm.as_ref())
229 .offset(offset)
230 .build()
231 .map_err(Error::MemoryMappingFailed)?;
232 Ok(MemoryRegion {
233 mapping,
234 guest_base,
235 shared_obj: BackingObject::Shm(shm),
236 obj_offset: offset,
237 options: Default::default(),
238 })
239 }
240
241 pub fn new_from_file(
244 size: u64,
245 guest_base: GuestAddress,
246 offset: u64,
247 file: Arc<File>,
248 ) -> Result<Self> {
249 let mapping = MemoryMappingBuilder::new(size as usize)
250 .from_file(&file)
251 .offset(offset)
252 .build()
253 .map_err(Error::MemoryMappingFailed)?;
254 Ok(MemoryRegion {
255 mapping,
256 guest_base,
257 shared_obj: BackingObject::File(file),
258 obj_offset: offset,
259 options: Default::default(),
260 })
261 }
262
263 fn start(&self) -> GuestAddress {
264 self.guest_base
265 }
266
267 fn end(&self) -> GuestAddress {
268 self.guest_base.unchecked_add(self.mapping.size() as u64)
270 }
271
272 fn contains(&self, addr: GuestAddress) -> bool {
273 addr >= self.guest_base && addr < self.end()
274 }
275
276 #[cfg(any(target_os = "android", target_os = "linux"))] fn contains_range(&self, addr: GuestAddress, size: u64) -> bool {
278 let Some(end_addr) = addr.checked_add(size) else {
279 return false;
280 };
281 addr >= self.guest_base && end_addr <= self.end()
282 }
283}
284
285#[derive(Clone, Debug)]
288pub struct GuestMemory {
289 regions: Arc<[MemoryRegion]>,
290 locked: bool,
291 use_punchhole_locked: bool,
292}
293
294impl AsRawDescriptors for GuestMemory {
295 fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
298 self.regions
299 .iter()
300 .map(|r| r.shared_obj.as_raw_descriptor())
301 .collect()
302 }
303}
304
305impl GuestMemory {
306 fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
308 let mut aligned_size = 0;
309 let pg_size = pagesize();
310 for range in ranges {
311 if range.2.file_backed.is_some() {
312 continue;
314 }
315 if range.1 % pg_size as u64 != 0 {
316 return Err(Error::MemoryNotAligned(range.0, range.1));
317 }
318
319 aligned_size += range.1;
320 }
321
322 let name = "crosvm_guest";
324 #[allow(unused_mut)]
326 let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
327
328 sys::finalize_shm(&mut shm)?;
329
330 Ok(shm)
331 }
332
333 pub fn new_with_options(
336 ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
337 ) -> Result<GuestMemory> {
338 let shm = Arc::new(GuestMemory::create_shm(ranges)?);
340
341 let mut regions = Vec::<MemoryRegion>::new();
343 let mut shm_offset = 0;
344
345 for range in ranges {
346 if let Some(last) = regions.last() {
347 if last
348 .guest_base
349 .checked_add(last.mapping.size() as u64)
350 .is_none_or(|a| a > range.0)
351 {
352 return Err(Error::MemoryRegionOverlap);
353 }
354 }
355
356 let size = usize::try_from(range.1)
357 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
358 if let Some(file_backed) = &range.2.file_backed {
359 assert_eq!(usize::try_from(file_backed.size).unwrap(), size);
360 let file = file_backed.open().map_err(Error::FiledBackedOpenFailed)?;
361 let mapping = MemoryMappingBuilder::new(size)
362 .from_file(&file)
363 .offset(file_backed.offset)
364 .align(range.2.align)
365 .protection(if file_backed.writable {
366 base::Protection::read_write()
367 } else {
368 base::Protection::read()
369 })
370 .build()
371 .map_err(Error::FiledBackedMemoryMappingFailed)?;
372 regions.push(MemoryRegion {
373 mapping,
374 guest_base: range.0,
375 shared_obj: BackingObject::File(Arc::new(file)),
376 obj_offset: file_backed.offset,
377 options: range.2.clone(),
378 });
379 } else {
380 let mapping = MemoryMappingBuilder::new(size)
381 .from_shared_memory(shm.as_ref())
382 .offset(shm_offset)
383 .align(range.2.align)
384 .build()
385 .map_err(Error::MemoryMappingFailed)?;
386 regions.push(MemoryRegion {
387 mapping,
388 guest_base: range.0,
389 shared_obj: BackingObject::Shm(shm.clone()),
390 obj_offset: shm_offset,
391 options: range.2.clone(),
392 });
393 shm_offset += size as u64;
394 }
395 }
396
397 Ok(GuestMemory {
398 regions: Arc::from(regions),
399 locked: false,
400 use_punchhole_locked: false,
401 })
402 }
403
404 pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
407 GuestMemory::new_with_options(
408 ranges
409 .iter()
410 .map(|(addr, size)| (*addr, *size, Default::default()))
411 .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
412 .as_slice(),
413 )
414 }
415
416 pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
418 regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
420
421 if regions.len() > 1 {
422 let mut prev_end = regions[0]
423 .guest_base
424 .checked_add(regions[0].mapping.size() as u64)
425 .ok_or(Error::MemoryRegionOverlap)?;
426 for region in ®ions[1..] {
427 if prev_end > region.guest_base {
428 return Err(Error::MemoryRegionOverlap);
429 }
430 prev_end = region
431 .guest_base
432 .checked_add(region.mapping.size() as u64)
433 .ok_or(Error::MemoryRegionTooLarge(
434 region.guest_base.0 as u128 + region.mapping.size() as u128,
435 ))?;
436 }
437 }
438
439 Ok(GuestMemory {
440 regions: Arc::from(regions),
441 locked: false,
442 use_punchhole_locked: false,
443 })
444 }
445
446 pub fn locked(&self) -> bool {
448 self.locked
449 }
450
451 pub fn use_punchhole_locked(&self) -> bool {
453 self.use_punchhole_locked
454 }
455
456 pub fn end_addr(&self) -> GuestAddress {
471 self.regions
472 .iter()
473 .max_by_key(|region| region.start())
474 .map_or(GuestAddress(0), MemoryRegion::end)
475 }
476
477 pub fn memory_size(&self) -> u64 {
479 self.regions
480 .iter()
481 .map(|region| region.mapping.size() as u64)
482 .sum()
483 }
484
485 pub fn address_in_range(&self, addr: GuestAddress) -> bool {
487 self.regions.iter().any(|region| region.contains(addr))
488 }
489
490 pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
493 self.regions
494 .iter()
495 .any(|region| region.start() < end && start < region.end())
496 }
497
498 pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
503 addr.checked_add(offset).and_then(|a| {
504 if self.address_in_range(a) {
505 Some(a)
506 } else {
507 None
508 }
509 })
510 }
511
512 pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
515 if length == 0 {
516 return false;
517 }
518
519 let end = if let Some(end) = start.checked_add(length - 1) {
520 end
521 } else {
522 return false;
523 };
524
525 self.regions
526 .iter()
527 .any(|region| region.start() <= start && end < region.end())
528 }
529
530 pub fn num_regions(&self) -> u64 {
532 self.regions.len() as u64
533 }
534
535 pub fn regions(&self) -> impl Iterator<Item = MemoryRegionInformation> {
536 self.regions
537 .iter()
538 .enumerate()
539 .map(|(index, region)| MemoryRegionInformation {
540 index,
541 guest_addr: region.start(),
542 size: region.mapping.size(),
543 host_addr: region.mapping.as_ptr() as usize,
544 shm: ®ion.shared_obj,
545 shm_offset: region.obj_offset,
546 options: region.options.clone(),
547 })
548 }
549
550 pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
570 let (mapping, offset, _) = self.find_region(guest_addr)?;
571 mapping
572 .write_slice(buf, offset)
573 .map_err(|e| Error::MemoryAccess(guest_addr, e))
574 }
575
576 pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
595 let expected = buf.len();
596 let completed = self.write_at_addr(buf, guest_addr)?;
597 if expected == completed {
598 Ok(())
599 } else {
600 Err(Error::ShortWrite {
601 expected,
602 completed,
603 })
604 }
605 }
606
607 pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
628 let (mapping, offset, _) = self.find_region(guest_addr)?;
629 mapping
630 .read_slice(buf, offset)
631 .map_err(|e| Error::MemoryAccess(guest_addr, e))
632 }
633
634 pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
653 let expected = buf.len();
654 let completed = self.read_at_addr(buf, guest_addr)?;
655 if expected == completed {
656 Ok(())
657 } else {
658 Err(Error::ShortRead {
659 expected,
660 completed,
661 })
662 }
663 }
664
665 pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
683 let (mapping, offset, _) = self.find_region(guest_addr)?;
684 mapping
685 .read_obj(offset)
686 .map_err(|e| Error::MemoryAccess(guest_addr, e))
687 }
688
689 pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
714 let (mapping, offset, _) = self.find_region(guest_addr)?;
715 mapping
716 .read_obj_volatile(offset)
717 .map_err(|e| Error::MemoryAccess(guest_addr, e))
718 }
719
720 pub fn write_obj_at_addr<T: IntoBytes + Immutable>(
736 &self,
737 val: T,
738 guest_addr: GuestAddress,
739 ) -> Result<()> {
740 let (mapping, offset, _) = self.find_region(guest_addr)?;
741 mapping
742 .write_obj(val, offset)
743 .map_err(|e| Error::MemoryAccess(guest_addr, e))
744 }
745
746 pub fn write_obj_at_addr_volatile<T: IntoBytes + Immutable>(
765 &self,
766 val: T,
767 guest_addr: GuestAddress,
768 ) -> Result<()> {
769 let (mapping, offset, _) = self.find_region(guest_addr)?;
770 mapping
771 .write_obj_volatile(val, offset)
772 .map_err(|e| Error::MemoryAccess(guest_addr, e))
773 }
774
775 pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
793 self.regions
794 .iter()
795 .find(|region| region.contains(addr))
796 .ok_or(Error::InvalidGuestAddress(addr))
797 .and_then(|region| {
798 region
801 .mapping
802 .get_slice(addr.offset_from(region.start()) as usize, len)
803 .map_err(Error::VolatileMemoryAccess)
804 })
805 }
806 pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
827 let (mapping, offset, _) = self.find_region(guest_addr)?;
828 Ok(
829 unsafe { mapping.as_ptr().add(offset) } as *const u8,
833 )
834 }
835
836 pub fn get_host_address_range(
858 &self,
859 guest_addr: GuestAddress,
860 size: usize,
861 ) -> Result<*const u8> {
862 if size == 0 {
863 return Err(Error::InvalidSize(size));
864 }
865
866 let (mapping, offset, _) = self.find_region(guest_addr)?;
868
869 if mapping.size().checked_sub(offset).is_none_or(|v| v < size) {
870 return Err(Error::InvalidGuestAddress(guest_addr));
871 }
872
873 Ok(
874 unsafe { mapping.as_ptr().add(offset) } as *const u8,
878 )
879 }
880
881 pub fn shm_region(
883 &self,
884 guest_addr: GuestAddress,
885 ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
886 self.regions
887 .iter()
888 .find(|region| region.contains(guest_addr))
889 .ok_or(Error::InvalidGuestAddress(guest_addr))
890 .map(|region| region.shared_obj.as_ref())
891 }
892
893 pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
895 self.shm_region(
896 self.checked_offset(self.regions[0].guest_base, offset)
897 .ok_or(Error::InvalidOffset(offset))?,
898 )
899 }
900
901 pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
911 self.regions
912 .iter()
913 .find(|region| region.contains(guest_addr))
914 .ok_or(Error::InvalidGuestAddress(guest_addr))
915 .map(|region| {
916 (
917 ®ion.mapping,
918 guest_addr.offset_from(region.start()) as usize,
919 region.obj_offset,
920 )
921 })
922 }
923
924 pub fn offset_from_base(
952 &self,
953 guest_addr: GuestAddress,
954 ) -> Result<(&(dyn AsRawDescriptor + Send + Sync), u64)> {
955 self.regions
956 .iter()
957 .find(|region| region.contains(guest_addr))
958 .ok_or(Error::InvalidGuestAddress(guest_addr))
959 .map(|region| {
960 (
961 region.shared_obj.as_ref(),
962 region.obj_offset + guest_addr.offset_from(region.start()),
963 )
964 })
965 }
966
967 #[deny(unsafe_op_in_unsafe_fn)]
976 pub unsafe fn snapshot<T: Write>(
977 &self,
978 w: &mut T,
979 compress: bool,
980 ) -> anyhow::Result<AnySnapshot> {
981 fn go(
982 this: &GuestMemory,
983 w: &mut impl Write,
984 ) -> anyhow::Result<Vec<MemoryRegionSnapshotMetadata>> {
985 let mut regions = Vec::new();
986 for region in this.regions.iter() {
987 let data_ranges = region
988 .find_data_ranges()
989 .context("find_data_ranges failed")?;
990 for range in &data_ranges {
991 let region_vslice = region
992 .mapping
993 .get_slice(range.start, range.end - range.start)?;
994 w.write_all(unsafe {
1005 std::slice::from_raw_parts(region_vslice.as_ptr(), region_vslice.size())
1006 })?;
1007 }
1008 regions.push(MemoryRegionSnapshotMetadata {
1009 guest_base: region.guest_base.0,
1010 size: region.mapping.size(),
1011 data_ranges,
1012 });
1013 }
1014 Ok(regions)
1015 }
1016
1017 let regions = if compress {
1018 let mut w = lz4_flex::frame::FrameEncoder::new(w);
1019 let regions = go(self, &mut w)?;
1020 w.finish()?;
1021 regions
1022 } else {
1023 go(self, w)?
1024 };
1025
1026 AnySnapshot::to_any(MemorySnapshotMetadata {
1027 regions,
1028 compressed: compress,
1029 })
1030 }
1031
1032 #[deny(unsafe_op_in_unsafe_fn)]
1041 pub unsafe fn restore<T: Read>(&self, metadata: AnySnapshot, r: &mut T) -> anyhow::Result<()> {
1042 let metadata: MemorySnapshotMetadata = AnySnapshot::from_any(metadata)?;
1043
1044 let mut r: Box<dyn Read> = if metadata.compressed {
1045 Box::new(lz4_flex::frame::FrameDecoder::new(r))
1046 } else {
1047 Box::new(r)
1048 };
1049
1050 if self.regions.len() != metadata.regions.len() {
1051 bail!(
1052 "snapshot expected {} memory regions but VM has {}",
1053 metadata.regions.len(),
1054 self.regions.len()
1055 );
1056 }
1057 for (region, metadata) in self.regions.iter().zip(metadata.regions.iter()) {
1058 let MemoryRegionSnapshotMetadata {
1059 guest_base,
1060 size,
1061 data_ranges,
1062 } = metadata;
1063 if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
1064 bail!("snapshot memory regions don't match VM memory regions");
1065 }
1066
1067 let mut prev_end = 0;
1068 for range in data_ranges {
1069 let hole_size = range
1070 .start
1071 .checked_sub(prev_end)
1072 .context("invalid data range")?;
1073 if hole_size > 0 {
1074 region.zero_range(prev_end, hole_size)?;
1075 }
1076 let region_vslice = region
1077 .mapping
1078 .get_slice(range.start, range.end - range.start)?;
1079
1080 r.read_exact(unsafe {
1084 std::slice::from_raw_parts_mut(region_vslice.as_mut_ptr(), region_vslice.size())
1085 })?;
1086
1087 prev_end = range.end;
1088 }
1089 let hole_size = region
1090 .mapping
1091 .size()
1092 .checked_sub(prev_end)
1093 .context("invalid data range")?;
1094 if hole_size > 0 {
1095 region.zero_range(prev_end, hole_size)?;
1096 }
1097 }
1098
1099 let mut buf = [0];
1101 if r.read(&mut buf)? != 0 {
1102 bail!("too many bytes");
1103 }
1104
1105 Ok(())
1106 }
1107}
1108
1109#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1110struct MemorySnapshotMetadata {
1111 regions: Vec<MemoryRegionSnapshotMetadata>,
1112 compressed: bool,
1113}
1114
1115#[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1116struct MemoryRegionSnapshotMetadata {
1117 guest_base: u64,
1118 size: usize,
1119 data_ranges: Vec<std::ops::Range<usize>>,
1122}
1123
1124unsafe impl BackingMemory for GuestMemory {
1127 fn get_volatile_slice(
1128 &self,
1129 mem_range: cros_async::MemRegion,
1130 ) -> mem::Result<VolatileSlice<'_>> {
1131 self.get_slice_at_addr(GuestAddress(mem_range.offset), mem_range.len)
1132 .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1133 }
1134}
1135
1136#[cfg(test)]
1137mod tests {
1138 use super::*;
1139
1140 #[test]
1141 fn test_alignment() {
1142 let start_addr1 = GuestAddress(0x0);
1143 let start_addr2 = GuestAddress(0x10000);
1144
1145 assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1146 assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1147 }
1148
1149 #[test]
1150 fn two_regions() {
1151 let start_addr1 = GuestAddress(0x0);
1152 let start_addr2 = GuestAddress(0x10000);
1153 let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1155
1156 assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1159 assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1160 assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1161 }
1162
1163 #[test]
1164 fn overlap_memory() {
1165 let start_addr1 = GuestAddress(0x0);
1166 let start_addr2 = GuestAddress(0x10000);
1167 assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1168 }
1169
1170 #[test]
1171 fn region_hole() {
1172 let start_addr1 = GuestAddress(0x0);
1173 let start_addr2 = GuestAddress(0x40000);
1174 let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1176
1177 assert!(gm.address_in_range(GuestAddress(0x10000)));
1178 assert!(!gm.address_in_range(GuestAddress(0x30000)));
1179 assert!(gm.address_in_range(GuestAddress(0x50000)));
1180 assert!(!gm.address_in_range(GuestAddress(0x60000)));
1181 assert!(!gm.address_in_range(GuestAddress(0x60000)));
1182 assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1183 assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1184 assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1185 assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1186 assert_eq!(
1187 gm.checked_offset(GuestAddress(0x50000), 0x8000),
1188 Some(GuestAddress(0x58000))
1189 );
1190 assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1191 assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1192 assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1193 assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1194
1195 assert_eq!(
1199 gm.checked_offset(GuestAddress(0x10000), 0x40000),
1200 Some(GuestAddress(0x50000))
1201 );
1202 assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1203 }
1204
1205 #[test]
1206 fn test_read_u64() {
1207 let start_addr1 = GuestAddress(0x0);
1208 let start_addr2 = GuestAddress(0x10000);
1209 let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1210
1211 let val1: u64 = 0xaa55aa55aa55aa55;
1212 let val2: u64 = 0x55aa55aa55aa55aa;
1213 gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1214 gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1215 .unwrap();
1216 let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1217 let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1218 assert_eq!(val1, num1);
1219 assert_eq!(val2, num2);
1220 }
1221
1222 #[test]
1223 fn test_memory_size() {
1224 let start_region1 = GuestAddress(0x0);
1225 let size_region1 = 0x10000;
1226 let start_region2 = GuestAddress(0x10000);
1227 let size_region2 = 0x20000;
1228 let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1229 .unwrap();
1230
1231 let mem_size = gm.memory_size();
1232 assert_eq!(mem_size, size_region1 + size_region2);
1233 }
1234
1235 fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1237 Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1238 }
1239
1240 #[test]
1241 fn guest_to_host() {
1242 let start_addr1 = GuestAddress(0x0);
1243 let start_addr2 = GuestAddress(0x10000);
1244 let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1245
1246 let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1248 let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1249 let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1250 let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1251 assert_eq!(host_addr1, addr1_base);
1252 assert_eq!(host_addr2, addr2_base);
1253
1254 let bad_addr = GuestAddress(0x123456);
1256 assert!(mem.get_host_address(bad_addr).is_err());
1257 }
1258
1259 #[test]
1260 fn guest_to_host_range() {
1261 let start_addr1 = GuestAddress(0x0);
1262 let start_addr2 = GuestAddress(0x10000);
1263 let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1264
1265 let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1267 let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1268 let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1269 let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1270 assert_eq!(host_addr1, addr1_base);
1271 assert_eq!(host_addr2, addr2_base);
1272
1273 let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1274 assert_eq!(host_addr3, addr2_base);
1275
1276 assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1278
1279 let bad_addr = GuestAddress(0x123456);
1281 assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1282 }
1283
1284 #[test]
1285 fn shm_offset() {
1286 let start_region1 = GuestAddress(0x0);
1287 let size_region1 = 0x10000;
1288 let start_region2 = GuestAddress(0x10000);
1289 let size_region2 = 0x20000;
1290 let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1291 .unwrap();
1292
1293 gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1294 gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1295 .unwrap();
1296
1297 for region in gm.regions() {
1298 let shm = match region.shm {
1299 BackingObject::Shm(s) => s,
1300 _ => {
1301 panic!("backing object isn't SharedMemory");
1302 }
1303 };
1304 let mmap = MemoryMappingBuilder::new(region.size)
1305 .from_shared_memory(shm)
1306 .offset(region.shm_offset)
1307 .build()
1308 .unwrap();
1309
1310 if region.index == 0 {
1311 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1312 }
1313
1314 if region.index == 1 {
1315 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1316 }
1317 }
1318 }
1319
1320 #[test]
1321 #[cfg(target_arch = "x86_64")]
1323 fn snapshot_restore() {
1324 let regions = &[
1325 (GuestAddress(0x0), 0x10000),
1327 (GuestAddress(0x10000), 0x10000),
1329 (GuestAddress(0x20000), 0x10000),
1331 (GuestAddress(0x30000), 0x10000),
1333 (GuestAddress(0x40000), 0x1000),
1335 ];
1336 let writes = &[
1337 (GuestAddress(0x0FFF0), 1u64),
1338 (GuestAddress(0x10000), 2u64),
1339 (GuestAddress(0x29000), 3u64),
1340 (GuestAddress(0x40000), 4u64),
1341 ];
1342
1343 let gm = GuestMemory::new(regions).unwrap();
1344 for &(addr, value) in writes {
1345 gm.write_obj_at_addr(value, addr).unwrap();
1346 }
1347
1348 let mut data = tempfile::tempfile().unwrap();
1349 let metadata_json = unsafe { gm.snapshot(&mut data, false).unwrap() };
1352 let metadata: MemorySnapshotMetadata =
1353 AnySnapshot::from_any(metadata_json.clone()).unwrap();
1354
1355 #[cfg(unix)]
1356 assert_eq!(
1357 metadata,
1358 MemorySnapshotMetadata {
1359 regions: vec![
1360 MemoryRegionSnapshotMetadata {
1361 guest_base: 0,
1362 size: 0x10000,
1363 data_ranges: vec![0x0F000..0x10000],
1364 },
1365 MemoryRegionSnapshotMetadata {
1366 guest_base: 0x10000,
1367 size: 0x10000,
1368 data_ranges: vec![0x00000..0x01000],
1369 },
1370 MemoryRegionSnapshotMetadata {
1371 guest_base: 0x20000,
1372 size: 0x10000,
1373 data_ranges: vec![0x09000..0x0A000],
1374 },
1375 MemoryRegionSnapshotMetadata {
1376 guest_base: 0x30000,
1377 size: 0x10000,
1378 data_ranges: vec![],
1379 },
1380 MemoryRegionSnapshotMetadata {
1381 guest_base: 0x40000,
1382 size: 0x1000,
1383 data_ranges: vec![0x00000..0x01000],
1384 }
1385 ],
1386 compressed: false,
1387 }
1388 );
1389 #[cfg(windows)]
1391 assert_eq!(
1392 metadata,
1393 MemorySnapshotMetadata {
1394 regions: vec![
1395 MemoryRegionSnapshotMetadata {
1396 guest_base: 0,
1397 size: 0x10000,
1398 data_ranges: vec![0x00000..0x10000],
1399 },
1400 MemoryRegionSnapshotMetadata {
1401 guest_base: 0x10000,
1402 size: 0x10000,
1403 data_ranges: vec![0x00000..0x10000],
1404 },
1405 MemoryRegionSnapshotMetadata {
1406 guest_base: 0x20000,
1407 size: 0x10000,
1408 data_ranges: vec![0x00000..0x10000],
1409 },
1410 MemoryRegionSnapshotMetadata {
1411 guest_base: 0x30000,
1412 size: 0x10000,
1413 data_ranges: vec![0x00000..0x10000],
1414 },
1415 MemoryRegionSnapshotMetadata {
1416 guest_base: 0x40000,
1417 size: 0x1000,
1418 data_ranges: vec![0x00000..0x01000],
1419 }
1420 ],
1421 compressed: false,
1422 }
1423 );
1424
1425 std::mem::drop(gm);
1426
1427 let gm2 = GuestMemory::new(regions).unwrap();
1428
1429 let hole_addr = GuestAddress(0x30000);
1431 gm2.write_obj_at_addr(8u64, hole_addr).unwrap();
1432
1433 use std::io::Seek;
1434 data.seek(std::io::SeekFrom::Start(0)).unwrap();
1435 unsafe { gm2.restore(metadata_json, &mut data).unwrap() };
1438
1439 assert_eq!(gm2.read_obj_from_addr::<u64>(hole_addr).unwrap(), 0);
1440 for &(addr, value) in writes {
1441 assert_eq!(gm2.read_obj_from_addr::<u64>(addr).unwrap(), value);
1442 }
1443 }
1444}