vm_memory/guest_memory/sys/
linux.rs1use base::linux::FileDataIterator;
6use base::linux::MemfdSeals;
7use base::linux::MemoryMappingUnix;
8use base::linux::SharedMemoryLinux;
9use base::MappedRegion;
10use base::SharedMemory;
11use bitflags::bitflags;
12
13use crate::Error;
14use crate::FileBackedMappingParameters;
15use crate::GuestAddress;
16use crate::GuestMemory;
17use crate::MemoryRegion;
18use crate::Result;
19
20bitflags! {
21 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
22 #[repr(transparent)]
23 pub struct MemoryPolicy: u32 {
24 const USE_HUGEPAGES = 1;
25 const LOCK_GUEST_MEMORY = (1 << 1);
26 const USE_PUNCHHOLE_LOCKED = (1 << 2);
27 }
28}
29
30pub(crate) fn finalize_shm(shm: &mut SharedMemory) -> Result<()> {
31 let mut seals = MemfdSeals::new();
35
36 seals.set_shrink_seal();
37 seals.set_grow_seal();
38 seals.set_seal_seal();
39
40 shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)
41}
42
43impl GuestMemory {
44 pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
49 if self.use_punchhole_locked() {
52 let region = self
53 .regions
54 .iter()
55 .find(|region| region.contains_range(addr, count))
56 .ok_or(Error::InvalidGuestRange(addr, count))?;
57 base::sys::linux::fallocate(
58 ®ion.shared_obj,
59 base::sys::linux::FallocateMode::PunchHole,
60 region.obj_offset + addr.offset_from(region.guest_base),
61 count,
62 )
63 .map_err(Error::PunchHole)
64 } else {
65 let (mapping, offset, _) = self.find_region(addr)?;
66 mapping
67 .remove_range(offset, count as usize)
68 .map_err(|e| Error::MemoryAccess(addr, e))
69 }
70 }
71
72 pub fn set_memory_policy(&mut self, mem_policy: MemoryPolicy) {
74 if mem_policy.is_empty() {
75 return;
76 }
77
78 for region in self.regions.iter() {
79 if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
80 let ret = region.mapping.use_hugepages();
81
82 if let Err(err) = ret {
83 println!("Failed to enable HUGEPAGE for mapping {err}");
84 }
85 }
86
87 if mem_policy.contains(MemoryPolicy::LOCK_GUEST_MEMORY) {
88 self.locked = true;
89
90 let ret = region.mapping.lock_all();
96
97 if let Err(err) = ret {
98 println!("Failed to lock memory for mapping {err}");
99 }
100 }
101
102 if mem_policy.contains(MemoryPolicy::USE_PUNCHHOLE_LOCKED) {
103 self.use_punchhole_locked = true;
104 }
105 }
106 }
107
108 pub fn use_dontfork(&self) -> anyhow::Result<()> {
109 for region in self.regions.iter() {
110 region.mapping.use_dontfork()?;
111 }
112 Ok(())
113 }
114}
115
116impl FileBackedMappingParameters {
117 pub fn open(&self) -> std::io::Result<std::fs::File> {
118 use std::os::unix::fs::OpenOptionsExt;
119 Ok(base::open_file_or_duplicate(
120 &self.path,
121 std::fs::OpenOptions::new()
122 .read(true)
123 .write(self.writable)
124 .custom_flags(if self.sync { libc::O_SYNC } else { 0 }),
125 )?)
126 }
127}
128
129impl MemoryRegion {
130 pub(crate) fn find_data_ranges(&self) -> anyhow::Result<Vec<std::ops::Range<usize>>> {
136 FileDataIterator::new(
137 &self.shared_obj,
138 self.obj_offset,
139 u64::try_from(self.mapping.size()).unwrap(),
140 )
141 .map(|range| {
142 let range = range?;
143 Ok(usize::try_from(range.start - self.obj_offset).unwrap()
145 ..usize::try_from(range.end - self.obj_offset).unwrap())
146 })
147 .collect()
148 }
149
150 pub(crate) fn zero_range(&self, offset: usize, size: usize) -> anyhow::Result<()> {
151 self.mapping.remove_range(offset, size)?;
152 Ok(())
153 }
154}