hypervisor/
lib.rs

1// Copyright 2020 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! A crate for abstracting the underlying kernel hypervisor used in crosvm.
6
7#[cfg(target_arch = "aarch64")]
8pub mod aarch64;
9pub mod caps;
10#[cfg(all(unix, target_arch = "aarch64", feature = "geniezone"))]
11pub mod geniezone;
12#[cfg(all(unix, target_arch = "aarch64", feature = "gunyah"))]
13pub mod gunyah;
14#[cfg(target_arch = "aarch64")]
15#[cfg(all(unix, target_arch = "aarch64", feature = "halla"))]
16pub mod halla;
17#[cfg(all(windows, feature = "haxm"))]
18pub mod haxm;
19#[cfg(any(target_os = "android", target_os = "linux"))]
20pub mod kvm;
21#[cfg(target_arch = "riscv64")]
22pub mod riscv64;
23#[cfg(all(windows, feature = "whpx"))]
24pub mod whpx;
25#[cfg(target_arch = "x86_64")]
26pub mod x86_64;
27
28use base::AsRawDescriptor;
29use base::Event;
30use base::MappedRegion;
31use base::Protection;
32use base::Result;
33use base::SafeDescriptor;
34use serde::Deserialize;
35use serde::Serialize;
36use vm_memory::GuestAddress;
37use vm_memory::GuestMemory;
38
39#[cfg(target_arch = "aarch64")]
40pub use crate::aarch64::*;
41pub use crate::caps::*;
42#[cfg(target_arch = "riscv64")]
43pub use crate::riscv64::*;
44#[cfg(target_arch = "x86_64")]
45pub use crate::x86_64::*;
46
47/// An index in the list of guest-mapped memory regions.
48pub type MemSlot = u32;
49
50/// Range of GPA space. Starting from `guest_address` up to `size`.
51pub struct MemRegion {
52    pub guest_address: GuestAddress,
53    pub size: u64,
54}
55
56/// Signal to the hypervisor on kernels that support the KVM_CAP_USER_CONFIGURE_NONCOHERENT_DMA (or
57/// equivalent) that during user memory region (memslot) configuration, a guest page's memtype
58/// should be considered in SLAT effective memtype determination rather than implicitly respecting
59/// only the host page's memtype.
60///
61/// This explicit control is needed for Virtio devices (e.g. gpu) that configure memslots for host
62/// WB page mappings with guest WC page mappings. See b/316337317, b/360295883 for more detail.
63#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
64pub enum MemCacheType {
65    /// Don't provide any explicit instruction to the hypervisor on how it should determine a
66    /// memslot's effective memtype.
67    ///
68    /// On KVM-VMX (Intel), this means that the memslot is flagged with VMX_EPT_IPAT_BIT such that
69    /// only the host memtype is respected.
70    CacheCoherent,
71    /// explicitly instruct the hypervisor to respect the guest page's memtype when determining the
72    /// memslot's effective memtype.
73    ///
74    /// On KVM-VMX (Intel), this means the memslot is NOT flagged with VMX_EPT_IPAT_BIT, and the
75    /// effective memtype will generally decay to the weaker amongst the host/guest memtypes and
76    /// the MTRR for the physical address.
77    CacheNonCoherent,
78}
79
80/// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and
81/// requests they be freed. Use without the guest's knowledge is sure to break something.
82pub enum BalloonEvent {
83    /// Balloon event when the region is acquired from the guest. The guest cannot access this
84    /// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon
85    /// spec, the given address and size are intended to be page-aligned.
86    Inflate(MemRegion),
87    /// Balloon event when the region is returned to the guest. VMM should reallocate memory and
88    /// register it with the hypervisor for accesses by the guest.
89    Deflate(MemRegion),
90    /// Balloon event when the requested memory size is achieved. This can be achieved through
91    /// either inflation or deflation. The `u64` will be the current size of the balloon in bytes.
92    BalloonTargetReached(u64),
93}
94
95/// Supported hypervisors.
96///
97/// When adding a new one, also update the HypervisorFfi in crosvm_control/src/lib.rs
98#[derive(Serialize, Deserialize, Debug, Clone)]
99pub enum HypervisorKind {
100    Geniezone,
101    Gunyah,
102    Halla,
103    Kvm,
104    Haxm,
105    Whpx,
106}
107
108/// A trait for checking hypervisor capabilities.
109pub trait Hypervisor: Send {
110    /// Makes a shallow clone of this `Hypervisor`.
111    fn try_clone(&self) -> Result<Self>
112    where
113        Self: Sized;
114
115    /// Checks if a particular `HypervisorCap` is available.
116    fn check_capability(&self, cap: HypervisorCap) -> bool;
117}
118
119/// A wrapper for using a VM and getting/setting its state.
120pub trait Vm: Send {
121    /// Makes a shallow clone of this `Vm`.
122    fn try_clone(&self) -> Result<Self>
123    where
124        Self: Sized;
125
126    /// Makes a shallow clone of the fd of this `Vm`.
127    fn try_clone_descriptor(&self) -> Result<SafeDescriptor>;
128
129    /// Returns hypervisor managing this `Vm`.
130    fn hypervisor_kind(&self) -> HypervisorKind;
131
132    /// Checks if a particular `VmCap` is available.
133    ///
134    /// This is distinct from the `Hypervisor` version of this method because some extensions depend
135    /// on the particular `Vm` instance. This method is encouraged because it more accurately
136    /// reflects the usable capabilities.
137    fn check_capability(&self, c: VmCap) -> bool;
138
139    /// Enable the VM capabilities.
140    fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> {
141        Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into())
142    }
143
144    /// Get the guest physical address size in bits.
145    fn get_guest_phys_addr_bits(&self) -> u8;
146
147    /// Gets the guest-mapped memory for the Vm.
148    fn get_memory(&self) -> &GuestMemory;
149
150    /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`.
151    ///
152    /// The slot that was assigned the memory mapping is returned on success.  The slot can be given
153    /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back
154    /// ownership of `mem_region`.
155    ///
156    /// Note that memory inserted into the VM's address space must not overlap with any other memory
157    /// slot's region.
158    ///
159    /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
160    /// write will trigger a mmio VM exit, leaving the memory untouched.
161    ///
162    /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
163    /// by the guest with `get_dirty_log`.
164    ///
165    /// `cache` can be used to set guest mem cache attribute if supported. Default is cache coherent
166    /// memory. Noncoherent memory means this memory might not be coherent from all access points,
167    /// e.g this could be the case when host GPU doesn't set the memory to be coherent with CPU
168    /// access. Setting this attribute would allow hypervisor to adjust guest mem control to ensure
169    /// synchronized guest access in noncoherent DMA case.
170    fn add_memory_region(
171        &mut self,
172        guest_addr: GuestAddress,
173        mem_region: Box<dyn MappedRegion>,
174        read_only: bool,
175        log_dirty_pages: bool,
176        cache: MemCacheType,
177    ) -> Result<MemSlot>;
178
179    /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at
180    /// `offset` from the start of the region.  `offset` must be page aligned.
181    fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>;
182
183    /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range
184    /// starting at `offset` from the start of the region, and with size `size`. `offset`
185    /// must be page aligned.
186    #[cfg(any(target_os = "android", target_os = "linux"))]
187    fn madvise_pageout_memory_region(
188        &mut self,
189        slot: MemSlot,
190        offset: usize,
191        size: usize,
192    ) -> Result<()>;
193
194    /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range
195    /// starting at `offset` from the start of the region, and with size `size`. `offset`
196    /// must be page aligned.
197    #[cfg(any(target_os = "android", target_os = "linux"))]
198    fn madvise_remove_memory_region(
199        &mut self,
200        slot: MemSlot,
201        offset: usize,
202        size: usize,
203    ) -> Result<()>;
204
205    /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot.
206    fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>;
207
208    /// Creates an emulated device.
209    fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>;
210
211    /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
212    /// `slot`.  Only works on VMs that support `VmCap::DirtyLog`.
213    ///
214    /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
215    /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
216    /// be 2 bytes or greater.
217    fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>;
218
219    /// Registers an event to be signaled whenever a certain address is written to.
220    ///
221    /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
222    /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
223    /// and must match the expected size of the guest's write.
224    ///
225    /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
226    /// triggered is prevented.
227    fn register_ioevent(
228        &mut self,
229        evt: &Event,
230        addr: IoEventAddress,
231        datamatch: Datamatch,
232    ) -> Result<()>;
233
234    /// Unregisters an event previously registered with `register_ioevent`.
235    ///
236    /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
237    /// `register_ioevent`.
238    fn unregister_ioevent(
239        &mut self,
240        evt: &Event,
241        addr: IoEventAddress,
242        datamatch: Datamatch,
243    ) -> Result<()>;
244
245    /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The
246    /// `data` slice represents the contents and length of the write, which is used to compare with
247    /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event
248    /// delivery, this is a no-op.
249    fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>;
250
251    /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest.
252    /// Only works on VMs that support `VmCap::PvClock`.
253    fn get_pvclock(&self) -> Result<ClockState>;
254
255    /// Sets the current timestamp of the paravirtual clock as seen by the current guest.
256    /// Only works on VMs that support `VmCap::PvClock`.
257    fn set_pvclock(&self, state: &ClockState) -> Result<()>;
258
259    /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
260    /// at `offset` bytes from the start of the arena with `prot` protections.
261    /// `offset` must be page aligned.
262    ///
263    /// # Arguments
264    /// * `offset` - Page aligned offset into the arena in bytes.
265    /// * `size` - Size of memory region in bytes.
266    /// * `fd` - File descriptor to mmap from.
267    /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
268    /// * `prot` - Protection (e.g. readable/writable) of the memory region.
269    fn add_fd_mapping(
270        &mut self,
271        slot: u32,
272        offset: usize,
273        size: usize,
274        fd: &dyn AsRawDescriptor,
275        fd_offset: u64,
276        prot: Protection,
277    ) -> Result<()>;
278
279    /// Remove `size`-byte mapping starting at `offset`.
280    fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>;
281
282    /// Events from virtio-balloon that affect the state for guest memory and host memory.
283    fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>;
284}
285
286/// Operation for Io and Mmio
287#[derive(Debug)]
288pub enum IoOperation<'a> {
289    /// Data to be read from a device on the bus.
290    ///
291    /// The `handle_fn` should fill the entire slice with the read data.
292    Read(&'a mut [u8]),
293
294    /// Data to be written to a device on the bus.
295    Write(&'a [u8]),
296}
297
298/// Parameters describing an MMIO or PIO from the guest.
299#[derive(Debug)]
300pub struct IoParams<'a> {
301    pub address: u64,
302    pub operation: IoOperation<'a>,
303}
304
305/// Handle to a virtual CPU that may be used to request a VM exit from within a signal handler.
306#[cfg(any(target_os = "android", target_os = "linux"))]
307pub struct VcpuSignalHandle {
308    inner: Box<dyn VcpuSignalHandleInner>,
309}
310
311#[cfg(any(target_os = "android", target_os = "linux"))]
312impl VcpuSignalHandle {
313    /// Request an immediate exit for this VCPU.
314    ///
315    /// This function is safe to call from a signal handler.
316    pub fn signal_immediate_exit(&self) {
317        self.inner.signal_immediate_exit()
318    }
319}
320
321/// Signal-safe mechanism for requesting an immediate VCPU exit.
322///
323/// Each hypervisor backend must implement this for its VCPU type.
324#[cfg(any(target_os = "android", target_os = "linux"))]
325pub(crate) trait VcpuSignalHandleInner {
326    /// Signal the associated VCPU to exit if it is currently running.
327    ///
328    /// # Safety
329    ///
330    /// The implementation of this function must be async signal safe.
331    /// <https://man7.org/linux/man-pages/man7/signal-safety.7.html>
332    fn signal_immediate_exit(&self);
333}
334
335/// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt
336/// state, which may be used to execute virtual machines.
337pub trait Vcpu: downcast_rs::DowncastSync {
338    /// Makes a shallow clone of this `Vcpu`.
339    fn try_clone(&self) -> Result<Self>
340    where
341        Self: Sized;
342
343    /// Casts this architecture specific trait object to the base trait object `Vcpu`.
344    fn as_vcpu(&self) -> &dyn Vcpu;
345
346    /// Runs the VCPU until it exits, returning the reason for the exit.
347    fn run(&mut self) -> Result<VcpuExit>;
348
349    /// Returns the vcpu id.
350    fn id(&self) -> usize;
351
352    /// Sets the bit that requests an immediate exit.
353    fn set_immediate_exit(&self, exit: bool);
354
355    /// Returns a handle that can be used to cause this VCPU to exit from `run()` from a signal
356    /// handler.
357    #[cfg(any(target_os = "android", target_os = "linux"))]
358    fn signal_handle(&self) -> VcpuSignalHandle;
359
360    /// Handles an incoming MMIO request from the guest.
361    ///
362    /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same
363    /// thread as run().
364    ///
365    /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO
366    /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and
367    /// set the return data in the vcpu so that the vcpu can resume running.
368    fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>;
369
370    /// Handles an incoming PIO from the guest.
371    ///
372    /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same
373    /// thread as run().
374    ///
375    /// Once called, it will determine whether an input or output was the reason for the Io exit,
376    /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set
377    /// the return data in the vcpu so that the vcpu can resume running.
378    fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>;
379
380    /// Signals to the hypervisor that this Vcpu is being paused by userspace.
381    fn on_suspend(&self) -> Result<()>;
382
383    /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
384    /// hypervisor API (e.g., kvm.h).  `args` are the arguments for enabling the feature, if any.
385    ///
386    /// # Safety
387    /// This function is marked as unsafe because `args` may be interpreted as pointers for some
388    /// capabilities. The caller must ensure that any pointers passed in the `args` array are
389    /// allocated as the kernel expects, and that mutable pointers are owned.
390    unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>;
391}
392
393downcast_rs::impl_downcast!(sync Vcpu);
394
395/// An address either in programmable I/O space or in memory mapped I/O space.
396#[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)]
397pub enum IoEventAddress {
398    Pio(u64),
399    Mmio(u64),
400}
401
402/// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
403#[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
404pub enum Datamatch {
405    AnyLength,
406    U8(Option<u8>),
407    U16(Option<u16>),
408    U32(Option<u32>),
409    U64(Option<u64>),
410}
411
412#[derive(Copy, Clone, Debug)]
413pub enum VcpuShutdownErrorKind {
414    DoubleFault,
415    TripleFault,
416    Other,
417}
418
419/// A Vcpu shutdown may signify an error, such as a double or triple fault,
420/// or hypervisor specific reasons. This error covers all such cases.
421#[derive(Copy, Clone, Debug)]
422pub struct VcpuShutdownError {
423    kind: VcpuShutdownErrorKind,
424    raw_error_code: u64,
425}
426
427impl VcpuShutdownError {
428    pub fn new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError {
429        Self {
430            kind,
431            raw_error_code,
432        }
433    }
434    pub fn kind(&self) -> VcpuShutdownErrorKind {
435        self.kind
436    }
437    pub fn get_raw_error_code(&self) -> u64 {
438        self.raw_error_code
439    }
440}
441
442// Note that when adding entries to the VcpuExit enum you may want to add corresponding entries in
443// crosvm::stats::exit_to_index and crosvm::stats::exit_index_to_str if you don't want the new
444// exit type to be categorized as "Unknown".
445
446/// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
447#[derive(Debug, Clone, Copy)]
448pub enum VcpuExit {
449    /// An io instruction needs to be emulated.
450    /// vcpu handle_io should be called to handle the io operation
451    Io,
452    /// A mmio instruction needs to be emulated.
453    /// vcpu handle_mmio should be called to handle the mmio operation
454    Mmio,
455    IoapicEoi {
456        vector: u8,
457    },
458    Exception,
459    Hypercall,
460    Debug,
461    Hlt,
462    IrqWindowOpen,
463    Shutdown(std::result::Result<(), VcpuShutdownError>),
464    FailEntry {
465        hardware_entry_failure_reason: u64,
466    },
467    Intr,
468    SetTpr,
469    TprAccess,
470    InternalError,
471    SystemEventShutdown,
472    SystemEventReset,
473    SystemEventCrash,
474    /// An invalid vcpu register was set while running.
475    InvalidVpRegister,
476    /// incorrect setup for vcpu requiring an unsupported feature
477    UnsupportedFeature,
478    /// vcpu run was user cancelled
479    Canceled,
480    /// an unrecoverable exception was encountered (different from Exception)
481    UnrecoverableException,
482    /// vcpu stopped due to an msr access.
483    MsrAccess,
484    /// vcpu stopped due to a cpuid request.
485    #[cfg(target_arch = "x86_64")]
486    Cpuid {
487        entry: CpuIdEntry,
488    },
489    /// vcpu stopped due to calling rdtsc
490    RdTsc,
491    /// vcpu stopped for an apic smi trap
492    ApicSmiTrap,
493    /// vcpu stopped due to an apic trap
494    ApicInitSipiTrap,
495    /// vcpu stoppted due to bus lock
496    BusLock,
497    /// Riscv supervisor call.
498    Sbi {
499        extension_id: u64,
500        function_id: u64,
501        args: [u64; 6],
502    },
503    /// Emulate CSR access from guest.
504    RiscvCsr {
505        csr_num: u64,
506        new_value: u64,
507        write_mask: u64,
508        ret_value: u64,
509    },
510}
511
512/// A device type to create with `Vm.create_device`.
513#[derive(Clone, Copy, Debug, PartialEq, Eq)]
514pub enum DeviceKind {
515    /// VFIO device for direct access to devices from userspace
516    Vfio,
517    /// ARM virtual general interrupt controller v2
518    #[cfg(target_arch = "aarch64")]
519    ArmVgicV2,
520    /// ARM virtual general interrupt controller v3
521    #[cfg(target_arch = "aarch64")]
522    ArmVgicV3,
523    /// ARM virtual interrupt translation service
524    #[cfg(target_arch = "aarch64")]
525    ArmVgicIts,
526    /// RiscV AIA in-kernel emulation
527    #[cfg(target_arch = "riscv64")]
528    RiscvAia,
529}
530
531/// The source chip of an `IrqSource`
532#[repr(C)]
533#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
534pub enum IrqSourceChip {
535    PicPrimary,
536    PicSecondary,
537    Ioapic,
538    Gic,
539    Aia,
540}
541
542/// A source of IRQs in an `IrqRoute`.
543#[repr(C)]
544#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
545pub enum IrqSource {
546    Irqchip {
547        chip: IrqSourceChip,
548        pin: u32,
549    },
550    Msi {
551        address: u64,
552        data: u32,
553        #[cfg(target_arch = "aarch64")]
554        pci_address: resources::PciAddress,
555    },
556}
557
558/// A single route for an IRQ.
559#[repr(C)]
560#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
561pub struct IrqRoute {
562    pub gsi: u32,
563    pub source: IrqSource,
564}
565
566/// The state of the paravirtual clock.
567#[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
568pub struct ClockState {
569    /// Current pv clock timestamp, as seen by the guest
570    pub clock: u64,
571}
572
573/// The MPState represents the state of a processor.
574#[repr(C)]
575#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
576pub enum MPState {
577    /// the vcpu is currently running (x86/x86_64,arm/arm64)
578    Runnable,
579    /// the vcpu is an application processor (AP) which has not yet received an INIT signal
580    /// (x86/x86_64)
581    Uninitialized,
582    /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64)
583    InitReceived,
584    /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64)
585    Halted,
586    /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64)
587    SipiReceived,
588    /// the vcpu is stopped (arm/arm64)
589    Stopped,
590}
591
592/// Whether the VM should be run in protected mode or not.
593#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
594pub enum ProtectionType {
595    /// The VM should be run in the unprotected mode, where the host has access to its memory.
596    Unprotected,
597    /// The VM should be run in protected mode, so the host cannot access its memory directly. It
598    /// should be booted via the protected VM firmware, so that it can access its secrets.
599    Protected,
600    /// The VM should be run in protected mode, so the host cannot access its memory directly. It
601    /// should be booted via a custom VM firmware, useful for debugging and testing.
602    ProtectedWithCustomFirmware,
603    /// The VM should be run in protected mode, but booted directly without pVM firmware. The host
604    /// will still be unable to access the VM memory, but it won't be given any secrets.
605    ProtectedWithoutFirmware,
606    /// The VM should be run in unprotected mode, but with the same memory layout as protected
607    /// mode, protected VM firmware loaded, and simulating protected mode as much as possible.
608    /// This is useful for debugging the protected VM firmware and other protected mode issues.
609    UnprotectedWithFirmware,
610}
611
612impl ProtectionType {
613    /// Returns whether the hypervisor will prevent us from accessing the VM's memory.
614    pub fn isolates_memory(&self) -> bool {
615        matches!(
616            self,
617            Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware
618        )
619    }
620
621    /// Returns whether the VMM needs to load the pVM firmware.
622    pub fn needs_firmware_loaded(&self) -> bool {
623        matches!(
624            self,
625            Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware
626        )
627    }
628
629    /// Returns whether the VM runs a pVM firmware.
630    pub fn runs_firmware(&self) -> bool {
631        self.needs_firmware_loaded() || matches!(self, Self::Protected)
632    }
633}
634
635#[derive(Clone, Copy)]
636pub struct Config {
637    #[cfg(target_arch = "aarch64")]
638    /// enable the Memory Tagging Extension in the guest
639    pub mte: bool,
640    pub protection_type: ProtectionType,
641    #[cfg(all(target_os = "android", target_arch = "aarch64"))]
642    pub ffa: bool,
643    pub force_disable_readonly_mem: bool,
644}
645
646impl Default for Config {
647    fn default() -> Config {
648        Config {
649            #[cfg(target_arch = "aarch64")]
650            mte: false,
651            protection_type: ProtectionType::Unprotected,
652            #[cfg(all(target_os = "android", target_arch = "aarch64"))]
653            ffa: false,
654            force_disable_readonly_mem: false,
655        }
656    }
657}