vm_memory/guest_memory/sys/
linux.rs1use base::linux::FileDataIterator;
6use base::linux::MemfdSeals;
7use base::linux::MemoryMappingUnix;
8use base::linux::SharedMemoryLinux;
9use base::MappedRegion;
10use base::SharedMemory;
11use bitflags::bitflags;
12
13use crate::Error;
14use crate::FileBackedMappingParameters;
15use crate::GuestAddress;
16use crate::GuestMemory;
17use crate::MemoryRegion;
18use crate::Result;
19
20bitflags! {
21 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
22 #[repr(transparent)]
23 pub struct MemoryPolicy: u32 {
24 const USE_HUGEPAGES = 1;
25 const LOCK_GUEST_MEMORY = (1 << 1);
26 const USE_PUNCHHOLE_LOCKED = (1 << 2);
27 }
28}
29
30pub(crate) fn finalize_shm(shm: &mut SharedMemory) -> Result<()> {
31 let mut seals = MemfdSeals::new();
35
36 seals.set_shrink_seal();
37 seals.set_grow_seal();
38 seals.set_seal_seal();
39
40 shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)
41}
42
43impl GuestMemory {
44 pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
48 let (mapping, offset, _) = self.find_region(addr)?;
49 mapping
50 .remove_range(offset, count as usize)
51 .map_err(|e| Error::MemoryAccess(addr, e))
52 }
53
54 pub fn punch_hole_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
56 let region = self
57 .regions
58 .iter()
59 .find(|region| region.contains_range(addr, count))
60 .ok_or(Error::InvalidGuestRange(addr, count))?;
61 base::sys::linux::fallocate(
62 ®ion.shared_obj,
63 base::sys::linux::FallocateMode::PunchHole,
64 region.obj_offset + addr.offset_from(region.guest_base),
65 count,
66 )
67 .map_err(Error::PunchHole)
68 }
69
70 pub fn set_memory_policy(&mut self, mem_policy: MemoryPolicy) {
72 if mem_policy.is_empty() {
73 return;
74 }
75
76 for region in self.regions.iter() {
77 if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
78 let ret = region.mapping.use_hugepages();
79
80 if let Err(err) = ret {
81 println!("Failed to enable HUGEPAGE for mapping {err}");
82 }
83 }
84
85 if mem_policy.contains(MemoryPolicy::LOCK_GUEST_MEMORY) {
86 self.locked = true;
87
88 let ret = region.mapping.lock_all();
94
95 if let Err(err) = ret {
96 println!("Failed to lock memory for mapping {err}");
97 }
98 }
99
100 if mem_policy.contains(MemoryPolicy::USE_PUNCHHOLE_LOCKED) {
101 self.use_punchhole_locked = true;
102 }
103 }
104 }
105
106 pub fn use_dontfork(&self) -> anyhow::Result<()> {
107 for region in self.regions.iter() {
108 region.mapping.use_dontfork()?;
109 }
110 Ok(())
111 }
112}
113
114impl FileBackedMappingParameters {
115 pub fn open(&self) -> std::io::Result<std::fs::File> {
116 use std::os::unix::fs::OpenOptionsExt;
117 Ok(base::open_file_or_duplicate(
118 &self.path,
119 std::fs::OpenOptions::new()
120 .read(true)
121 .write(self.writable)
122 .custom_flags(if self.sync { libc::O_SYNC } else { 0 }),
123 )?)
124 }
125}
126
127impl MemoryRegion {
128 pub(crate) fn find_data_ranges(&self) -> anyhow::Result<Vec<std::ops::Range<usize>>> {
134 FileDataIterator::new(
135 &self.shared_obj,
136 self.obj_offset,
137 u64::try_from(self.mapping.size()).unwrap(),
138 )
139 .map(|range| {
140 let range = range?;
141 Ok(usize::try_from(range.start - self.obj_offset).unwrap()
143 ..usize::try_from(range.end - self.obj_offset).unwrap())
144 })
145 .collect()
146 }
147
148 pub(crate) fn zero_range(&self, offset: usize, size: usize) -> anyhow::Result<()> {
149 self.mapping.remove_range(offset, size)?;
150 Ok(())
151 }
152}