hypervisor/lib.rs
1// Copyright 2020 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! A crate for abstracting the underlying kernel hypervisor used in crosvm.
6
7#[cfg(target_arch = "aarch64")]
8pub mod aarch64;
9pub mod caps;
10#[cfg(all(unix, target_arch = "aarch64", feature = "geniezone"))]
11pub mod geniezone;
12#[cfg(all(unix, target_arch = "aarch64", feature = "gunyah"))]
13pub mod gunyah;
14#[cfg(target_arch = "aarch64")]
15#[cfg(all(unix, target_arch = "aarch64", feature = "halla"))]
16pub mod halla;
17#[cfg(all(windows, feature = "haxm"))]
18pub mod haxm;
19#[cfg(any(target_os = "android", target_os = "linux"))]
20pub mod kvm;
21#[cfg(target_arch = "riscv64")]
22pub mod riscv64;
23#[cfg(all(windows, feature = "whpx"))]
24pub mod whpx;
25#[cfg(target_arch = "x86_64")]
26pub mod x86_64;
27
28use base::AsRawDescriptor;
29use base::Event;
30use base::MappedRegion;
31use base::Protection;
32use base::Result;
33use base::SafeDescriptor;
34use serde::Deserialize;
35use serde::Serialize;
36use vm_memory::GuestAddress;
37use vm_memory::GuestMemory;
38
39#[cfg(target_arch = "aarch64")]
40pub use crate::aarch64::*;
41pub use crate::caps::*;
42#[cfg(target_arch = "riscv64")]
43pub use crate::riscv64::*;
44#[cfg(target_arch = "x86_64")]
45pub use crate::x86_64::*;
46
47cfg_if::cfg_if! {
48 if #[cfg(target_arch = "aarch64")] {
49 pub use CpuConfigAArch64 as CpuConfigArch;
50 pub use Hypervisor as HypervisorArch;
51 pub use VcpuAArch64 as VcpuArch;
52 pub use VcpuInitAArch64 as VcpuInitArch;
53 pub use VmAArch64 as VmArch;
54 } else if #[cfg(target_arch = "riscv64")] {
55 pub use CpuConfigRiscv64 as CpuConfigArch;
56 pub use Hypervisor as HypervisorArch;
57 pub use VcpuInitRiscv64 as VcpuInitArch;
58 pub use VcpuRiscv64 as VcpuArch;
59 pub use VmRiscv64 as VmArch;
60 } else if #[cfg(target_arch = "x86_64")] {
61 pub use CpuConfigX86_64 as CpuConfigArch;
62 pub use HypervisorX86_64 as HypervisorArch;
63 pub use VcpuInitX86_64 as VcpuInitArch;
64 pub use VcpuX86_64 as VcpuArch;
65 pub use VmX86_64 as VmArch;
66 }
67}
68
69/// An index in the list of guest-mapped memory regions.
70pub type MemSlot = u32;
71
72/// Range of GPA space. Starting from `guest_address` up to `size`.
73pub struct MemRegion {
74 pub guest_address: GuestAddress,
75 pub size: u64,
76}
77
78/// Signal to the hypervisor on kernels that support the KVM_CAP_USER_CONFIGURE_NONCOHERENT_DMA (or
79/// equivalent) that during user memory region (memslot) configuration, a guest page's memtype
80/// should be considered in SLAT effective memtype determination rather than implicitly respecting
81/// only the host page's memtype.
82///
83/// This explicit control is needed for Virtio devices (e.g. gpu) that configure memslots for host
84/// WB page mappings with guest WC page mappings. See b/316337317, b/360295883 for more detail.
85#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
86pub enum MemCacheType {
87 /// Don't provide any explicit instruction to the hypervisor on how it should determine a
88 /// memslot's effective memtype.
89 ///
90 /// On KVM-VMX (Intel), this means that the memslot is flagged with VMX_EPT_IPAT_BIT such that
91 /// only the host memtype is respected.
92 CacheCoherent,
93 /// explicitly instruct the hypervisor to respect the guest page's memtype when determining the
94 /// memslot's effective memtype.
95 ///
96 /// On KVM-VMX (Intel), this means the memslot is NOT flagged with VMX_EPT_IPAT_BIT, and the
97 /// effective memtype will generally decay to the weaker amongst the host/guest memtypes and
98 /// the MTRR for the physical address.
99 CacheNonCoherent,
100}
101
102/// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and
103/// requests they be freed. Use without the guest's knowledge is sure to break something.
104pub enum BalloonEvent {
105 /// Balloon event when the region is acquired from the guest. The guest cannot access this
106 /// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon
107 /// spec, the given address and size are intended to be page-aligned.
108 Inflate(MemRegion),
109 /// Balloon event when the region is returned to the guest. VMM should reallocate memory and
110 /// register it with the hypervisor for accesses by the guest.
111 Deflate(MemRegion),
112 /// Balloon event when the requested memory size is achieved. This can be achieved through
113 /// either inflation or deflation. The `u64` will be the current size of the balloon in bytes.
114 BalloonTargetReached(u64),
115}
116
117/// Supported hypervisors.
118///
119/// When adding a new one, also update the HypervisorFfi in crosvm_control/src/lib.rs
120#[derive(Serialize, Deserialize, Debug, Clone)]
121pub enum HypervisorKind {
122 Geniezone,
123 Gunyah,
124 Halla,
125 Kvm,
126 Haxm,
127 Whpx,
128}
129
130/// A trait for checking hypervisor capabilities.
131pub trait Hypervisor: Send {
132 /// Makes a shallow clone of this `Hypervisor`.
133 fn try_clone(&self) -> Result<Self>
134 where
135 Self: Sized;
136
137 /// Checks if a particular `HypervisorCap` is available.
138 fn check_capability(&self, cap: HypervisorCap) -> bool;
139}
140
141/// A wrapper for using a VM and getting/setting its state.
142pub trait Vm: Send + Sync {
143 /// Makes a shallow clone of the fd of this `Vm`.
144 fn try_clone_descriptor(&self) -> Result<SafeDescriptor>;
145
146 /// Returns hypervisor managing this `Vm`.
147 fn hypervisor_kind(&self) -> HypervisorKind;
148
149 /// Checks if a particular `VmCap` is available.
150 ///
151 /// This is distinct from the `Hypervisor` version of this method because some extensions depend
152 /// on the particular `Vm` instance. This method is encouraged because it more accurately
153 /// reflects the usable capabilities.
154 fn check_capability(&self, c: VmCap) -> bool;
155
156 /// Enable the VM capabilities.
157 fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> {
158 Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into())
159 }
160
161 /// Get the guest physical address size in bits.
162 fn get_guest_phys_addr_bits(&self) -> u8;
163
164 /// Gets the guest-mapped memory for the Vm.
165 fn get_memory(&self) -> &GuestMemory;
166
167 /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`.
168 ///
169 /// The slot that was assigned the memory mapping is returned on success. The slot can be given
170 /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back
171 /// ownership of `mem_region`.
172 ///
173 /// Note that memory inserted into the VM's address space must not overlap with any other memory
174 /// slot's region.
175 ///
176 /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
177 /// write will trigger a mmio VM exit, leaving the memory untouched.
178 ///
179 /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
180 /// by the guest with `get_dirty_log`.
181 ///
182 /// `cache` can be used to set guest mem cache attribute if supported. Default is cache coherent
183 /// memory. Noncoherent memory means this memory might not be coherent from all access points,
184 /// e.g this could be the case when host GPU doesn't set the memory to be coherent with CPU
185 /// access. Setting this attribute would allow hypervisor to adjust guest mem control to ensure
186 /// synchronized guest access in noncoherent DMA case.
187 fn add_memory_region(
188 &self,
189 guest_addr: GuestAddress,
190 mem_region: Box<dyn MappedRegion>,
191 read_only: bool,
192 log_dirty_pages: bool,
193 cache: MemCacheType,
194 ) -> Result<MemSlot>;
195
196 /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at
197 /// `offset` from the start of the region. `offset` must be page aligned.
198 fn msync_memory_region(&self, slot: MemSlot, offset: usize, size: usize) -> Result<()>;
199
200 /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range
201 /// starting at `offset` from the start of the region, and with size `size`. `offset`
202 /// must be page aligned.
203 #[cfg(any(target_os = "android", target_os = "linux"))]
204 fn madvise_pageout_memory_region(
205 &self,
206 slot: MemSlot,
207 offset: usize,
208 size: usize,
209 ) -> Result<()>;
210
211 /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range
212 /// starting at `offset` from the start of the region, and with size `size`. `offset`
213 /// must be page aligned.
214 #[cfg(any(target_os = "android", target_os = "linux"))]
215 fn madvise_remove_memory_region(&self, slot: MemSlot, offset: usize, size: usize)
216 -> Result<()>;
217
218 /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot.
219 fn remove_memory_region(&self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>;
220
221 /// Creates an emulated device.
222 fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>;
223
224 /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
225 /// `slot`. Only works on VMs that support `VmCap::DirtyLog`.
226 ///
227 /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
228 /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
229 /// be 2 bytes or greater.
230 fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>;
231
232 /// Registers an event to be signaled whenever a certain address is written to.
233 ///
234 /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
235 /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
236 /// and must match the expected size of the guest's write.
237 ///
238 /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
239 /// triggered is prevented.
240 fn register_ioevent(
241 &self,
242 evt: Event,
243 addr: IoEventAddress,
244 datamatch: Datamatch,
245 ) -> Result<()>;
246
247 /// Unregisters an event previously registered with `register_ioevent`.
248 ///
249 /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
250 /// `register_ioevent`.
251 fn unregister_ioevent(
252 &self,
253 evt: Event,
254 addr: IoEventAddress,
255 datamatch: Datamatch,
256 ) -> Result<()>;
257
258 /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The
259 /// `data` slice represents the contents and length of the write, which is used to compare with
260 /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event
261 /// delivery, this is a no-op.
262 fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>;
263
264 /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest.
265 /// Only works on VMs that support `VmCap::PvClock`.
266 fn get_pvclock(&self) -> Result<ClockState>;
267
268 /// Sets the current timestamp of the paravirtual clock as seen by the current guest.
269 /// Only works on VMs that support `VmCap::PvClock`.
270 fn set_pvclock(&self, state: &ClockState) -> Result<()>;
271
272 /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
273 /// at `offset` bytes from the start of the arena with `prot` protections.
274 /// `offset` must be page aligned.
275 ///
276 /// # Arguments
277 /// * `offset` - Page aligned offset into the arena in bytes.
278 /// * `size` - Size of memory region in bytes.
279 /// * `fd` - File descriptor to mmap from.
280 /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
281 /// * `prot` - Protection (e.g. readable/writable) of the memory region.
282 fn add_fd_mapping(
283 &self,
284 slot: u32,
285 offset: usize,
286 size: usize,
287 fd: &dyn AsRawDescriptor,
288 fd_offset: u64,
289 prot: Protection,
290 ) -> Result<()>;
291
292 /// Remove `size`-byte mapping starting at `offset`.
293 fn remove_mapping(&self, slot: u32, offset: usize, size: usize) -> Result<()>;
294
295 /// Events from virtio-balloon that affect the state for guest memory and host memory.
296 fn handle_balloon_event(&self, event: BalloonEvent) -> Result<()>;
297
298 /// Registers with the hypervisor for CrosVM to handle any guest hypercall in the range.
299 fn enable_hypercalls(&self, nr: u64, count: usize) -> Result<()>;
300
301 /// Registers with the hypervisor for CrosVM to handle the guest hypercall.
302 fn enable_hypercall(&self, nr: u64) -> Result<()> {
303 self.enable_hypercalls(nr, 1)
304 }
305}
306
307/// Operation for Io and Mmio
308#[derive(Debug)]
309pub enum IoOperation<'a> {
310 /// Data to be read from a device on the bus.
311 ///
312 /// The `handle_fn` should fill the entire slice with the read data.
313 Read(&'a mut [u8]),
314
315 /// Data to be written to a device on the bus.
316 Write(&'a [u8]),
317}
318
319/// Parameters describing an MMIO or PIO from the guest.
320#[derive(Debug)]
321pub struct IoParams<'a> {
322 pub address: u64,
323 pub operation: IoOperation<'a>,
324}
325
326/// Architecture-agnostic wrapper for any hypercall ABI between CrosVM and the guest.
327#[derive(Debug)]
328pub struct HypercallAbi {
329 hypercall_id: usize,
330 args: Vec<usize>,
331 res: Vec<usize>,
332}
333
334impl HypercallAbi {
335 /// Creates a new `HypercallAbi` instance, with the default error result.
336 pub fn new(hypercall_id: usize, args: &[usize], default_res: &[usize]) -> Self {
337 Self {
338 hypercall_id,
339 args: args.to_owned(),
340 res: default_res.to_owned(),
341 }
342 }
343
344 /// Returns the hypercall unique identifier, for routing.
345 pub fn hypercall_id(&self) -> usize {
346 self.hypercall_id
347 }
348
349 /// Returns the n-th guest-provided architecture-specific arguments.
350 pub fn get_argument(&self, n: usize) -> Option<&usize> {
351 self.args.get(n)
352 }
353
354 /// Returns the architecture-specific results for the guest, if set.
355 pub fn get_results(&self) -> &[usize] {
356 self.res.as_slice()
357 }
358
359 /// Sets the architecture-specific results for the guest.
360 pub fn set_results(&mut self, res: &[usize]) {
361 self.res = res.to_owned()
362 }
363}
364
365/// Handle to a virtual CPU that may be used to request a VM exit from within a signal handler.
366#[cfg(any(target_os = "android", target_os = "linux"))]
367pub struct VcpuSignalHandle {
368 inner: Box<dyn VcpuSignalHandleInner>,
369}
370
371#[cfg(any(target_os = "android", target_os = "linux"))]
372impl VcpuSignalHandle {
373 /// Request an immediate exit for this VCPU.
374 ///
375 /// This function is safe to call from a signal handler.
376 pub fn signal_immediate_exit(&self) {
377 self.inner.signal_immediate_exit()
378 }
379}
380
381/// Signal-safe mechanism for requesting an immediate VCPU exit.
382///
383/// Each hypervisor backend must implement this for its VCPU type.
384#[cfg(any(target_os = "android", target_os = "linux"))]
385pub(crate) trait VcpuSignalHandleInner {
386 /// Signal the associated VCPU to exit if it is currently running.
387 ///
388 /// # Safety
389 ///
390 /// The implementation of this function must be async signal safe.
391 /// <https://man7.org/linux/man-pages/man7/signal-safety.7.html>
392 fn signal_immediate_exit(&self);
393}
394
395/// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt
396/// state, which may be used to execute virtual machines.
397pub trait Vcpu: std::any::Any + Send + Sync {
398 /// Runs the VCPU until it exits, returning the reason for the exit.
399 fn run(&self) -> Result<VcpuExit>;
400
401 /// Returns the vcpu id.
402 fn id(&self) -> usize;
403
404 /// Sets the bit that requests an immediate exit.
405 fn set_immediate_exit(&self, exit: bool);
406
407 /// Returns a handle that can be used to cause this VCPU to exit from `run()` from a signal
408 /// handler.
409 #[cfg(any(target_os = "android", target_os = "linux"))]
410 fn signal_handle(&self) -> VcpuSignalHandle;
411
412 /// Handles an incoming MMIO request from the guest.
413 ///
414 /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same
415 /// thread as run().
416 ///
417 /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO
418 /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and
419 /// set the return data in the vcpu so that the vcpu can resume running.
420 fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>;
421
422 /// Handles an incoming PIO from the guest.
423 ///
424 /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same
425 /// thread as run().
426 ///
427 /// Once called, it will determine whether an input or output was the reason for the Io exit,
428 /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set
429 /// the return data in the vcpu so that the vcpu can resume running.
430 fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>;
431
432 /// Handles an incoming hypercall from the guest.
433 fn handle_hypercall(
434 &self,
435 _handle_fn: &mut dyn FnMut(&mut HypercallAbi) -> anyhow::Result<()>,
436 ) -> anyhow::Result<()> {
437 anyhow::bail!(
438 "handle_hypercall not implemented for {}",
439 std::any::type_name::<Self>(),
440 )
441 }
442
443 /// Signals to the hypervisor that this Vcpu is being paused by userspace.
444 fn on_suspend(&self) -> Result<()>;
445
446 /// Enables a hypervisor-specific extension on this Vcpu. `cap` is a constant defined by the
447 /// hypervisor API (e.g., kvm.h). `args` are the arguments for enabling the feature, if any.
448 ///
449 /// # Safety
450 /// This function is marked as unsafe because `args` may be interpreted as pointers for some
451 /// capabilities. The caller must ensure that any pointers passed in the `args` array are
452 /// allocated as the kernel expects, and that mutable pointers are owned.
453 unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>;
454}
455
456/// An address either in programmable I/O space or in memory mapped I/O space.
457#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)]
458pub enum IoEventAddress {
459 Pio(u64),
460 Mmio(u64),
461}
462
463/// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
464#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
465pub enum Datamatch {
466 AnyLength,
467 U8(Option<u8>),
468 U16(Option<u16>),
469 U32(Option<u32>),
470 U64(Option<u64>),
471}
472
473#[derive(Copy, Clone, Debug)]
474pub enum VcpuShutdownErrorKind {
475 DoubleFault,
476 TripleFault,
477 Other,
478}
479
480/// A Vcpu shutdown may signify an error, such as a double or triple fault,
481/// or hypervisor specific reasons. This error covers all such cases.
482#[derive(Copy, Clone, Debug)]
483pub struct VcpuShutdownError {
484 kind: VcpuShutdownErrorKind,
485 raw_error_code: u64,
486}
487
488impl VcpuShutdownError {
489 pub fn new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError {
490 Self {
491 kind,
492 raw_error_code,
493 }
494 }
495 pub fn kind(&self) -> VcpuShutdownErrorKind {
496 self.kind
497 }
498 pub fn get_raw_error_code(&self) -> u64 {
499 self.raw_error_code
500 }
501}
502
503// Note that when adding entries to the VcpuExit enum you may want to add corresponding entries in
504// crosvm::stats::exit_to_index and crosvm::stats::exit_index_to_str if you don't want the new
505// exit type to be categorized as "Unknown".
506
507/// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
508#[derive(Debug, Clone, Copy)]
509pub enum VcpuExit {
510 /// An io instruction needs to be emulated.
511 /// vcpu handle_io should be called to handle the io operation
512 Io,
513 /// A mmio instruction needs to be emulated.
514 /// vcpu handle_mmio should be called to handle the mmio operation
515 Mmio,
516 IoapicEoi {
517 vector: u8,
518 },
519 Exception,
520 Hypercall,
521 Debug,
522 Hlt,
523 IrqWindowOpen,
524 Shutdown(std::result::Result<(), VcpuShutdownError>),
525 FailEntry {
526 hardware_entry_failure_reason: u64,
527 },
528 Intr,
529 SetTpr,
530 TprAccess,
531 InternalError,
532 SystemEventShutdown,
533 SystemEventReset,
534 SystemEventCrash,
535 /// An invalid vcpu register was set while running.
536 InvalidVpRegister,
537 /// incorrect setup for vcpu requiring an unsupported feature
538 UnsupportedFeature,
539 /// vcpu run was user cancelled
540 Canceled,
541 /// an unrecoverable exception was encountered (different from Exception)
542 UnrecoverableException,
543 /// vcpu stopped due to an msr access.
544 MsrAccess,
545 /// vcpu stopped due to a cpuid request.
546 #[cfg(target_arch = "x86_64")]
547 Cpuid {
548 entry: CpuIdEntry,
549 },
550 /// vcpu stopped due to calling rdtsc
551 RdTsc,
552 /// vcpu stopped for an apic smi trap
553 ApicSmiTrap,
554 /// vcpu stopped due to an apic trap
555 ApicInitSipiTrap,
556 /// vcpu stoppted due to bus lock
557 BusLock,
558 /// Riscv supervisor call.
559 Sbi {
560 extension_id: u64,
561 function_id: u64,
562 args: [u64; 6],
563 },
564 /// Emulate CSR access from guest.
565 RiscvCsr {
566 csr_num: u64,
567 new_value: u64,
568 write_mask: u64,
569 ret_value: u64,
570 },
571}
572
573/// A device type to create with `Vm.create_device`.
574#[derive(Clone, Copy, Debug, PartialEq, Eq)]
575pub enum DeviceKind {
576 /// VFIO device for direct access to devices from userspace
577 Vfio,
578 /// ARM virtual general interrupt controller v2
579 #[cfg(target_arch = "aarch64")]
580 ArmVgicV2,
581 /// ARM virtual general interrupt controller v3
582 #[cfg(target_arch = "aarch64")]
583 ArmVgicV3,
584 /// ARM virtual interrupt translation service
585 #[cfg(target_arch = "aarch64")]
586 ArmVgicIts,
587 /// RiscV AIA in-kernel emulation
588 #[cfg(target_arch = "riscv64")]
589 RiscvAia,
590}
591
592/// The source chip of an `IrqSource`
593#[repr(C)]
594#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
595pub enum IrqSourceChip {
596 PicPrimary,
597 PicSecondary,
598 Ioapic,
599 Gic,
600 Aia,
601}
602
603/// A source of IRQs in an `IrqRoute`.
604#[repr(C)]
605#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
606pub enum IrqSource {
607 Irqchip {
608 chip: IrqSourceChip,
609 pin: u32,
610 },
611 Msi {
612 address: u64,
613 data: u32,
614 #[cfg(target_arch = "aarch64")]
615 pci_address: resources::PciAddress,
616 },
617}
618
619/// A single route for an IRQ.
620#[repr(C)]
621#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
622pub struct IrqRoute {
623 pub gsi: u32,
624 pub source: IrqSource,
625}
626
627/// The state of the paravirtual clock.
628#[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
629pub struct ClockState {
630 /// Current pv clock timestamp, as seen by the guest
631 pub clock: u64,
632}
633
634/// The MPState represents the state of a processor.
635#[repr(C)]
636#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
637pub enum MPState {
638 /// the vcpu is currently running (x86/x86_64,arm/arm64)
639 Runnable,
640 /// the vcpu is an application processor (AP) which has not yet received an INIT signal
641 /// (x86/x86_64)
642 Uninitialized,
643 /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64)
644 InitReceived,
645 /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64)
646 Halted,
647 /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64)
648 SipiReceived,
649 /// the vcpu is stopped (arm/arm64)
650 Stopped,
651}
652
653/// Whether the VM should be run in protected mode or not.
654#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
655pub enum ProtectionType {
656 /// The VM should be run in the unprotected mode, where the host has access to its memory.
657 Unprotected,
658 /// The VM should be run in protected mode, so the host cannot access its memory directly. It
659 /// should be booted via the protected VM firmware, so that it can access its secrets.
660 Protected,
661 /// The VM should be run in protected mode, so the host cannot access its memory directly. It
662 /// should be booted via a custom VM firmware, useful for debugging and testing.
663 ProtectedWithCustomFirmware,
664 /// The VM should be run in protected mode, but booted directly without pVM firmware. The host
665 /// will still be unable to access the VM memory, but it won't be given any secrets.
666 ProtectedWithoutFirmware,
667 /// The VM should be run in unprotected mode, but with the same memory layout as protected
668 /// mode, protected VM firmware loaded, and simulating protected mode as much as possible.
669 /// This is useful for debugging the protected VM firmware and other protected mode issues.
670 UnprotectedWithFirmware,
671}
672
673impl ProtectionType {
674 /// Returns whether the hypervisor will prevent us from accessing the VM's memory.
675 pub fn isolates_memory(&self) -> bool {
676 matches!(
677 self,
678 Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware
679 )
680 }
681
682 /// Returns whether the VMM needs to load the pVM firmware.
683 pub fn needs_firmware_loaded(&self) -> bool {
684 matches!(
685 self,
686 Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware
687 )
688 }
689
690 /// Returns whether the VM runs a pVM firmware.
691 pub fn runs_firmware(&self) -> bool {
692 self.needs_firmware_loaded() || matches!(self, Self::Protected)
693 }
694}
695
696#[derive(Clone, Copy)]
697pub struct Config {
698 #[cfg(target_arch = "aarch64")]
699 /// enable the Memory Tagging Extension in the guest
700 pub mte: bool,
701 pub protection_type: ProtectionType,
702 #[cfg(all(target_os = "android", target_arch = "aarch64"))]
703 pub ffa: bool,
704 pub force_disable_readonly_mem: bool,
705}
706
707impl Default for Config {
708 fn default() -> Config {
709 Config {
710 #[cfg(target_arch = "aarch64")]
711 mte: false,
712 protection_type: ProtectionType::Unprotected,
713 #[cfg(all(target_os = "android", target_arch = "aarch64"))]
714 ffa: false,
715 force_disable_readonly_mem: false,
716 }
717 }
718}