1pub(crate) mod gpu;
6
7use std::path::Path;
8use std::sync::LazyLock;
9use std::time::Duration;
10
11use base::error;
12use base::AsRawDescriptor;
13use base::Descriptor;
14use base::Error as SysError;
15use base::MemoryMappingArena;
16use base::MmapError;
17use base::Protection;
18use base::SafeDescriptor;
19use base::Tube;
20use base::UnixSeqpacket;
21use hypervisor::MemCacheType;
22use hypervisor::MemSlot;
23use hypervisor::Vm;
24use libc::EINVAL;
25use libc::ERANGE;
26use resources::Alloc;
27use resources::SystemAllocator;
28use serde::Deserialize;
29use serde::Serialize;
30use vm_memory::GuestAddress;
31
32use crate::client::HandleRequestResult;
33use crate::VmMappedMemoryRegion;
34use crate::VmRequest;
35use crate::VmResponse;
36
37pub fn handle_request<T: AsRef<Path> + std::fmt::Debug>(
38 request: &VmRequest,
39 socket_path: T,
40) -> HandleRequestResult {
41 handle_request_with_timeout(request, socket_path, None)
42}
43
44pub fn handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>(
45 request: &VmRequest,
46 socket_path: T,
47 timeout: Option<Duration>,
48) -> HandleRequestResult {
49 match UnixSeqpacket::connect(&socket_path) {
50 Ok(s) => {
51 let socket = Tube::try_from(s).map_err(|_| ())?;
52 if timeout.is_some() {
53 if let Err(e) = socket.set_recv_timeout(timeout) {
54 error!(
55 "failed to set recv timeout on socket at '{:?}': {}",
56 socket_path, e
57 );
58 return Err(());
59 }
60 }
61 if let Err(e) = socket.send(request) {
62 error!(
63 "failed to send request to socket at '{:?}': {}",
64 socket_path, e
65 );
66 return Err(());
67 }
68 match socket.recv() {
69 Ok(response) => Ok(response),
70 Err(e) => {
71 error!(
72 "failed to recv response from socket at '{:?}': {}",
73 socket_path, e
74 );
75 Err(())
76 }
77 }
78 }
79 Err(e) => {
80 error!("failed to connect to socket at '{:?}': {}", socket_path, e);
81 Err(())
82 }
83 }
84}
85
86#[derive(Serialize, Deserialize, Debug)]
87pub enum VmMemoryMappingRequest {
88 MsyncArena {
93 slot: MemSlot,
94 offset: usize,
95 size: usize,
96 },
97
98 MadvisePageout {
101 slot: MemSlot,
102 offset: usize,
103 size: usize,
104 },
105
106 MadviseRemove {
109 slot: MemSlot,
110 offset: usize,
111 size: usize,
112 },
113}
114
115#[derive(Serialize, Deserialize, Debug)]
116pub enum VmMemoryMappingResponse {
117 Ok,
118 Err(SysError),
119}
120
121impl VmMemoryMappingRequest {
122 pub fn execute(&self, vm: &dyn Vm) -> VmMemoryMappingResponse {
131 use self::VmMemoryMappingRequest::*;
132 match *self {
133 MsyncArena { slot, offset, size } => match vm.msync_memory_region(slot, offset, size) {
134 Ok(()) => VmMemoryMappingResponse::Ok,
135 Err(e) => VmMemoryMappingResponse::Err(e),
136 },
137 MadvisePageout { slot, offset, size } => {
138 match vm.madvise_pageout_memory_region(slot, offset, size) {
139 Ok(()) => VmMemoryMappingResponse::Ok,
140 Err(e) => VmMemoryMappingResponse::Err(e),
141 }
142 }
143 MadviseRemove { slot, offset, size } => {
144 match vm.madvise_remove_memory_region(slot, offset, size) {
145 Ok(()) => VmMemoryMappingResponse::Ok,
146 Err(e) => VmMemoryMappingResponse::Err(e),
147 }
148 }
149 }
150 }
151}
152
153#[derive(Serialize, Deserialize, Debug)]
154pub enum FsMappingRequest {
155 AllocateSharedMemoryRegion(Alloc),
157 CreateMemoryMapping {
159 slot: u32,
162 fd: SafeDescriptor,
164 size: usize,
166 file_offset: u64,
168 prot: Protection,
171 mem_offset: usize,
173 },
174 RemoveMemoryMapping {
176 slot: u32,
178 offset: usize,
180 size: usize,
182 },
183}
184
185pub fn prepare_shared_memory_region(
186 vm: &dyn Vm,
187 allocator: &mut SystemAllocator,
188 alloc: Alloc,
189 cache: MemCacheType,
190) -> Result<VmMappedMemoryRegion, SysError> {
191 if !matches!(alloc, Alloc::PciBar { .. }) {
192 return Err(SysError::new(EINVAL));
193 }
194 match allocator.mmio_allocator_any().get(&alloc) {
195 Some((range, _)) => {
196 let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
197 Some(v) => v,
198 None => return Err(SysError::new(ERANGE)),
199 };
200 let arena = match MemoryMappingArena::new(size) {
201 Ok(a) => a,
202 Err(MmapError::SystemCallFailed(e)) => return Err(e),
203 _ => return Err(SysError::new(EINVAL)),
204 };
205
206 match vm.add_memory_region(
207 GuestAddress(range.start),
208 Box::new(arena),
209 false,
210 false,
211 cache,
212 ) {
213 Ok(slot) => Ok(VmMappedMemoryRegion {
214 guest_address: GuestAddress(range.start),
215 slot,
216 }),
217 Err(e) => Err(e),
218 }
219 }
220 None => Err(SysError::new(EINVAL)),
221 }
222}
223
224static SHOULD_PREPARE_MEMORY_REGION: LazyLock<bool> = LazyLock::new(|| {
225 if cfg!(target_arch = "x86_64") {
226 match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") {
231 Ok(bytes) if !bytes.is_empty() => bytes[0] == b'Y',
232 _ => false,
233 }
234 } else if cfg!(target_pointer_width = "64") {
235 true
236 } else {
237 false
239 }
240});
241
242pub fn should_prepare_memory_region() -> bool {
243 *SHOULD_PREPARE_MEMORY_REGION
244}
245
246impl FsMappingRequest {
247 pub fn execute(&self, vm: &dyn Vm, allocator: &mut SystemAllocator) -> VmResponse {
248 use self::FsMappingRequest::*;
249 match *self {
250 AllocateSharedMemoryRegion(alloc) => {
251 match prepare_shared_memory_region(
252 vm,
253 allocator,
254 alloc,
255 MemCacheType::CacheCoherent,
256 ) {
257 Ok(VmMappedMemoryRegion { slot, .. }) => VmResponse::RegisterMemory { slot },
258 Err(e) => VmResponse::Err(e),
259 }
260 }
261 CreateMemoryMapping {
262 slot,
263 ref fd,
264 size,
265 file_offset,
266 prot,
267 mem_offset,
268 } => {
269 let raw_fd: Descriptor = Descriptor(fd.as_raw_descriptor());
270
271 match vm.add_fd_mapping(slot, mem_offset, size, &raw_fd, file_offset, prot) {
272 Ok(()) => VmResponse::Ok,
273 Err(e) => VmResponse::Err(e),
274 }
275 }
276 RemoveMemoryMapping { slot, offset, size } => {
277 match vm.remove_mapping(slot, offset, size) {
278 Ok(()) => VmResponse::Ok,
279 Err(e) => VmResponse::Err(e),
280 }
281 }
282 }
283 }
284}