1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
use base::MemfdSeals;
use base::MemoryMappingUnix;
use base::SharedMemory;
use base::SharedMemoryUnix;
use bitflags::bitflags;
use crate::Error;
use crate::GuestAddress;
use crate::GuestMemory;
use crate::Result;
bitflags! {
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[repr(transparent)]
pub struct MemoryPolicy: u32 {
const USE_HUGEPAGES = 1;
const LOCK_GUEST_MEMORY = (1 << 1);
}
}
pub(crate) fn finalize_shm(shm: &mut SharedMemory) -> Result<()> {
let mut seals = MemfdSeals::new();
seals.set_shrink_seal();
seals.set_grow_seal();
seals.set_seal_seal();
shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)
}
impl GuestMemory {
pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
let (mapping, offset, _) = self.find_region(addr)?;
mapping
.remove_range(offset, count as usize)
.map_err(|e| Error::MemoryAccess(addr, e))
}
pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
if mem_policy.is_empty() {
return;
}
for (_, region) in self.regions.iter().enumerate() {
if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
let ret = region.mapping.use_hugepages();
if let Err(err) = ret {
println!("Failed to enable HUGEPAGE for mapping {}", err);
}
}
if mem_policy.contains(MemoryPolicy::LOCK_GUEST_MEMORY) {
let ret = region.mapping.lock_all();
if let Err(err) = ret {
println!("Failed to lock memory for mapping {}", err);
}
}
}
}
pub fn use_dontfork(&self) -> anyhow::Result<()> {
for region in self.regions.iter() {
region.mapping.use_dontfork()?;
}
Ok(())
}
}