vm_control/sys/
linux.rs

1// Copyright 2022 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5pub(crate) mod gpu;
6
7use std::path::Path;
8use std::sync::LazyLock;
9use std::time::Duration;
10
11use base::error;
12use base::AsRawDescriptor;
13use base::Descriptor;
14use base::Error as SysError;
15use base::MemoryMappingArena;
16use base::MmapError;
17use base::Protection;
18use base::SafeDescriptor;
19use base::Tube;
20use base::UnixSeqpacket;
21use hypervisor::MemCacheType;
22use hypervisor::MemSlot;
23use hypervisor::Vm;
24use libc::EINVAL;
25use libc::ERANGE;
26use resources::Alloc;
27use resources::SystemAllocator;
28use serde::Deserialize;
29use serde::Serialize;
30use vm_memory::GuestAddress;
31
32use crate::client::HandleRequestResult;
33use crate::VmMappedMemoryRegion;
34use crate::VmRequest;
35use crate::VmResponse;
36
37pub fn handle_request<T: AsRef<Path> + std::fmt::Debug>(
38    request: &VmRequest,
39    socket_path: T,
40) -> HandleRequestResult {
41    handle_request_with_timeout(request, socket_path, None)
42}
43
44pub fn handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>(
45    request: &VmRequest,
46    socket_path: T,
47    timeout: Option<Duration>,
48) -> HandleRequestResult {
49    match UnixSeqpacket::connect(&socket_path) {
50        Ok(s) => {
51            let socket = Tube::try_from(s).map_err(|_| ())?;
52            if timeout.is_some() {
53                if let Err(e) = socket.set_recv_timeout(timeout) {
54                    error!(
55                        "failed to set recv timeout on socket at '{:?}': {}",
56                        socket_path, e
57                    );
58                    return Err(());
59                }
60            }
61            if let Err(e) = socket.send(request) {
62                error!(
63                    "failed to send request to socket at '{:?}': {}",
64                    socket_path, e
65                );
66                return Err(());
67            }
68            match socket.recv() {
69                Ok(response) => Ok(response),
70                Err(e) => {
71                    error!(
72                        "failed to recv response from socket at '{:?}': {}",
73                        socket_path, e
74                    );
75                    Err(())
76                }
77            }
78        }
79        Err(e) => {
80            error!("failed to connect to socket at '{:?}': {}", socket_path, e);
81            Err(())
82        }
83    }
84}
85
86#[derive(Serialize, Deserialize, Debug)]
87pub enum VmMemoryMappingRequest {
88    /// Flush the content of a memory mapping to its backing file.
89    /// `slot` selects the arena (as returned by `Vm::add_mmap_arena`).
90    /// `offset` is the offset of the mapping to sync within the arena.
91    /// `size` is the size of the mapping to sync within the arena.
92    MsyncArena {
93        slot: MemSlot,
94        offset: usize,
95        size: usize,
96    },
97
98    /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range
99    /// starting at `offset` from the start of the region, and with size `size`.
100    MadvisePageout {
101        slot: MemSlot,
102        offset: usize,
103        size: usize,
104    },
105
106    /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range
107    /// starting at `offset` from the start of the region, and with size `size`.
108    MadviseRemove {
109        slot: MemSlot,
110        offset: usize,
111        size: usize,
112    },
113}
114
115#[derive(Serialize, Deserialize, Debug)]
116pub enum VmMemoryMappingResponse {
117    Ok,
118    Err(SysError),
119}
120
121impl VmMemoryMappingRequest {
122    /// Executes this request on the given Vm.
123    ///
124    /// # Arguments
125    /// * `vm` - The `Vm` to perform the request on.
126    ///
127    /// This does not return a result, instead encapsulating the success or failure in a
128    /// `VmMsyncResponse` with the intended purpose of sending the response back over the socket
129    /// that received this `VmMsyncResponse`.
130    pub fn execute(&self, vm: &dyn Vm) -> VmMemoryMappingResponse {
131        use self::VmMemoryMappingRequest::*;
132        match *self {
133            MsyncArena { slot, offset, size } => match vm.msync_memory_region(slot, offset, size) {
134                Ok(()) => VmMemoryMappingResponse::Ok,
135                Err(e) => VmMemoryMappingResponse::Err(e),
136            },
137            MadvisePageout { slot, offset, size } => {
138                match vm.madvise_pageout_memory_region(slot, offset, size) {
139                    Ok(()) => VmMemoryMappingResponse::Ok,
140                    Err(e) => VmMemoryMappingResponse::Err(e),
141                }
142            }
143            MadviseRemove { slot, offset, size } => {
144                match vm.madvise_remove_memory_region(slot, offset, size) {
145                    Ok(()) => VmMemoryMappingResponse::Ok,
146                    Err(e) => VmMemoryMappingResponse::Err(e),
147                }
148            }
149        }
150    }
151}
152
153#[derive(Serialize, Deserialize, Debug)]
154pub enum FsMappingRequest {
155    /// Create an anonymous memory mapping that spans the entire region described by `Alloc`.
156    AllocateSharedMemoryRegion(Alloc),
157    /// Create a memory mapping.
158    CreateMemoryMapping {
159        /// The slot for a MemoryMappingArena, previously returned by a response to an
160        /// `AllocateSharedMemoryRegion` request.
161        slot: u32,
162        /// The file descriptor that should be mapped.
163        fd: SafeDescriptor,
164        /// The size of the mapping.
165        size: usize,
166        /// The offset into the file from where the mapping should start.
167        file_offset: u64,
168        /// The memory protection to be used for the mapping.  Protections other than readable and
169        /// writable will be silently dropped.
170        prot: Protection,
171        /// The offset into the shared memory region where the mapping should be placed.
172        mem_offset: usize,
173    },
174    /// Remove a memory mapping.
175    RemoveMemoryMapping {
176        /// The slot for a MemoryMappingArena.
177        slot: u32,
178        /// The offset into the shared memory region.
179        offset: usize,
180        /// The size of the mapping.
181        size: usize,
182    },
183}
184
185pub fn prepare_shared_memory_region(
186    vm: &dyn Vm,
187    allocator: &mut SystemAllocator,
188    alloc: Alloc,
189    cache: MemCacheType,
190) -> Result<VmMappedMemoryRegion, SysError> {
191    if !matches!(alloc, Alloc::PciBar { .. }) {
192        return Err(SysError::new(EINVAL));
193    }
194    match allocator.mmio_allocator_any().get(&alloc) {
195        Some((range, _)) => {
196            let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
197                Some(v) => v,
198                None => return Err(SysError::new(ERANGE)),
199            };
200            let arena = match MemoryMappingArena::new(size) {
201                Ok(a) => a,
202                Err(MmapError::SystemCallFailed(e)) => return Err(e),
203                _ => return Err(SysError::new(EINVAL)),
204            };
205
206            match vm.add_memory_region(
207                GuestAddress(range.start),
208                Box::new(arena),
209                false,
210                false,
211                cache,
212            ) {
213                Ok(slot) => Ok(VmMappedMemoryRegion {
214                    guest_address: GuestAddress(range.start),
215                    slot,
216                }),
217                Err(e) => Err(e),
218            }
219        }
220        None => Err(SysError::new(EINVAL)),
221    }
222}
223
224static SHOULD_PREPARE_MEMORY_REGION: LazyLock<bool> = LazyLock::new(|| {
225    if cfg!(target_arch = "x86_64") {
226        // The legacy x86 MMU allocates an rmap and a page tracking array
227        // that take 2.5MiB per 1GiB of user memory region address space,
228        // so avoid mapping the whole shared memory region if we're not
229        // using the tdp mmu.
230        match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") {
231            Ok(bytes) if !bytes.is_empty() => bytes[0] == b'Y',
232            _ => false,
233        }
234    } else if cfg!(target_pointer_width = "64") {
235        true
236    } else {
237        // Not enough address space on 32-bit systems
238        false
239    }
240});
241
242pub fn should_prepare_memory_region() -> bool {
243    *SHOULD_PREPARE_MEMORY_REGION
244}
245
246impl FsMappingRequest {
247    pub fn execute(&self, vm: &dyn Vm, allocator: &mut SystemAllocator) -> VmResponse {
248        use self::FsMappingRequest::*;
249        match *self {
250            AllocateSharedMemoryRegion(alloc) => {
251                match prepare_shared_memory_region(
252                    vm,
253                    allocator,
254                    alloc,
255                    MemCacheType::CacheCoherent,
256                ) {
257                    Ok(VmMappedMemoryRegion { slot, .. }) => VmResponse::RegisterMemory { slot },
258                    Err(e) => VmResponse::Err(e),
259                }
260            }
261            CreateMemoryMapping {
262                slot,
263                ref fd,
264                size,
265                file_offset,
266                prot,
267                mem_offset,
268            } => {
269                let raw_fd: Descriptor = Descriptor(fd.as_raw_descriptor());
270
271                match vm.add_fd_mapping(slot, mem_offset, size, &raw_fd, file_offset, prot) {
272                    Ok(()) => VmResponse::Ok,
273                    Err(e) => VmResponse::Err(e),
274                }
275            }
276            RemoveMemoryMapping { slot, offset, size } => {
277                match vm.remove_mapping(slot, offset, size) {
278                    Ok(()) => VmResponse::Ok,
279                    Err(e) => VmResponse::Err(e),
280                }
281            }
282        }
283    }
284}