arch/
lib.rs

1// Copyright 2018 The ChromiumOS Authors
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5//! Virtual machine architecture support code.
6
7pub mod android;
8pub mod fdt;
9pub mod pstore;
10pub mod serial;
11
12pub mod sys;
13
14use std::collections::BTreeMap;
15use std::error::Error as StdError;
16use std::fs::File;
17use std::io;
18use std::ops::Deref;
19use std::path::PathBuf;
20use std::str::FromStr;
21use std::sync::mpsc;
22use std::sync::mpsc::SendError;
23use std::sync::Arc;
24
25use acpi_tables::sdt::SDT;
26use base::syslog;
27use base::AsRawDescriptors;
28use base::FileGetLen;
29use base::FileReadWriteAtVolatile;
30use base::RecvTube;
31use base::SendTube;
32use base::Tube;
33use devices::virtio::VirtioDevice;
34use devices::BarRange;
35use devices::Bus;
36use devices::BusDevice;
37use devices::BusDeviceObj;
38use devices::BusError;
39use devices::BusResumeDevice;
40use devices::FwCfgParameters;
41use devices::GpeScope;
42use devices::HotPlugBus;
43use devices::IrqChip;
44use devices::IrqEventSource;
45use devices::PciAddress;
46use devices::PciBus;
47use devices::PciDevice;
48use devices::PciDeviceError;
49use devices::PciInterruptPin;
50use devices::PciRoot;
51use devices::PciRootCommand;
52use devices::PreferredIrq;
53#[cfg(any(target_os = "android", target_os = "linux"))]
54use devices::ProxyDevice;
55use devices::SerialHardware;
56use devices::SerialParameters;
57pub use fdt::apply_device_tree_overlays;
58pub use fdt::DtbOverlay;
59#[cfg(feature = "gdb")]
60use gdbstub::arch::Arch;
61use hypervisor::MemCacheType;
62use hypervisor::Vm;
63#[cfg(windows)]
64use jail::FakeMinijailStub as Minijail;
65#[cfg(any(target_os = "android", target_os = "linux"))]
66use minijail::Minijail;
67use remain::sorted;
68use resources::SystemAllocator;
69use resources::SystemAllocatorConfig;
70use serde::de::Visitor;
71use serde::Deserialize;
72use serde::Serialize;
73use serde_keyvalue::FromKeyValues;
74pub use serial::add_serial_devices;
75pub use serial::get_serial_cmdline;
76pub use serial::set_default_serial_parameters;
77pub use serial::GetSerialCmdlineError;
78pub use serial::SERIAL_ADDR;
79use sync::Condvar;
80use sync::Mutex;
81#[cfg(any(target_os = "android", target_os = "linux"))]
82pub use sys::linux::PlatformBusResources;
83use thiserror::Error;
84use uuid::Uuid;
85use vm_control::BatControl;
86use vm_control::BatteryType;
87use vm_control::PmResource;
88use vm_memory::GuestAddress;
89use vm_memory::GuestMemory;
90use vm_memory::GuestMemoryError;
91use vm_memory::MemoryRegionInformation;
92use vm_memory::MemoryRegionOptions;
93
94cfg_if::cfg_if! {
95    if #[cfg(target_arch = "aarch64")] {
96        pub use devices::IrqChipAArch64 as IrqChipArch;
97        #[cfg(feature = "gdb")]
98        pub use gdbstub_arch::aarch64::AArch64 as GdbArch;
99        pub use hypervisor::CpuConfigAArch64 as CpuConfigArch;
100        pub use hypervisor::Hypervisor as HypervisorArch;
101        pub use hypervisor::VcpuAArch64 as VcpuArch;
102        pub use hypervisor::VcpuInitAArch64 as VcpuInitArch;
103        pub use hypervisor::VmAArch64 as VmArch;
104    } else if #[cfg(target_arch = "riscv64")] {
105        pub use devices::IrqChipRiscv64 as IrqChipArch;
106        #[cfg(feature = "gdb")]
107        pub use gdbstub_arch::riscv::Riscv64 as GdbArch;
108        pub use hypervisor::CpuConfigRiscv64 as CpuConfigArch;
109        pub use hypervisor::Hypervisor as HypervisorArch;
110        pub use hypervisor::VcpuInitRiscv64 as VcpuInitArch;
111        pub use hypervisor::VcpuRiscv64 as VcpuArch;
112        pub use hypervisor::VmRiscv64 as VmArch;
113    } else if #[cfg(target_arch = "x86_64")] {
114        pub use devices::IrqChipX86_64 as IrqChipArch;
115        #[cfg(feature = "gdb")]
116        pub use gdbstub_arch::x86::X86_64_SSE as GdbArch;
117        pub use hypervisor::CpuConfigX86_64 as CpuConfigArch;
118        pub use hypervisor::HypervisorX86_64 as HypervisorArch;
119        pub use hypervisor::VcpuInitX86_64 as VcpuInitArch;
120        pub use hypervisor::VcpuX86_64 as VcpuArch;
121        pub use hypervisor::VmX86_64 as VmArch;
122    }
123}
124
125pub enum VmImage {
126    Kernel(File),
127    Bios(File),
128}
129
130#[derive(Clone, Debug, Deserialize, Serialize, FromKeyValues, PartialEq, Eq)]
131#[serde(deny_unknown_fields, rename_all = "kebab-case")]
132pub struct Pstore {
133    pub path: PathBuf,
134    pub size: u32,
135}
136
137#[derive(Clone, Copy, Debug, Serialize, Deserialize, FromKeyValues)]
138#[serde(deny_unknown_fields, rename_all = "kebab-case")]
139pub enum FdtPosition {
140    /// At the start of RAM.
141    Start,
142    /// Near the end of RAM.
143    End,
144    /// After the payload, with some padding for alignment.
145    AfterPayload,
146}
147
148/// Set of CPU cores.
149#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
150pub struct CpuSet(Vec<usize>);
151
152impl CpuSet {
153    pub fn new<I: IntoIterator<Item = usize>>(cpus: I) -> Self {
154        CpuSet(cpus.into_iter().collect())
155    }
156
157    pub fn iter(&self) -> std::slice::Iter<'_, usize> {
158        self.0.iter()
159    }
160}
161
162impl FromIterator<usize> for CpuSet {
163    fn from_iter<T>(iter: T) -> Self
164    where
165        T: IntoIterator<Item = usize>,
166    {
167        CpuSet::new(iter)
168    }
169}
170
171#[cfg(target_arch = "aarch64")]
172fn sve_auto_default() -> bool {
173    true
174}
175
176/// The SVE config for Vcpus.
177#[cfg(target_arch = "aarch64")]
178#[derive(Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
179#[serde(deny_unknown_fields, rename_all = "kebab-case")]
180pub struct SveConfig {
181    /// Detect if SVE is available and enable accordingly. `enable` is ignored if auto is true
182    #[serde(default = "sve_auto_default")]
183    pub auto: bool,
184}
185
186#[cfg(target_arch = "aarch64")]
187impl Default for SveConfig {
188    fn default() -> Self {
189        SveConfig {
190            auto: sve_auto_default(),
191        }
192    }
193}
194
195/// FFA config
196// For now this is limited to android, will be opened to other aarch64 based pVMs after
197// corresponding kernel APIs are upstreamed.
198#[cfg(all(target_os = "android", target_arch = "aarch64"))]
199#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize, FromKeyValues)]
200#[serde(deny_unknown_fields, rename_all = "kebab-case")]
201pub struct FfaConfig {
202    /// Just enable FFA, don't care about the negotiated version.
203    #[serde(default)]
204    pub auto: bool,
205}
206
207fn parse_cpu_range(s: &str, cpuset: &mut Vec<usize>) -> Result<(), String> {
208    fn parse_cpu(s: &str) -> Result<usize, String> {
209        s.parse()
210            .map_err(|_| format!("invalid CPU index {s} - index must be a non-negative integer"))
211    }
212
213    let (first_cpu, last_cpu) = match s.split_once('-') {
214        Some((first_cpu, last_cpu)) => {
215            let first_cpu = parse_cpu(first_cpu)?;
216            let last_cpu = parse_cpu(last_cpu)?;
217
218            if last_cpu < first_cpu {
219                return Err(format!(
220                    "invalid CPU range {s} - ranges must be from low to high"
221                ));
222            }
223            (first_cpu, last_cpu)
224        }
225        None => {
226            let cpu = parse_cpu(s)?;
227            (cpu, cpu)
228        }
229    };
230
231    cpuset.extend(first_cpu..=last_cpu);
232
233    Ok(())
234}
235
236impl FromStr for CpuSet {
237    type Err = String;
238
239    fn from_str(s: &str) -> Result<Self, Self::Err> {
240        let mut cpuset = Vec::new();
241        for part in s.split(',') {
242            parse_cpu_range(part, &mut cpuset)?;
243        }
244        Ok(CpuSet::new(cpuset))
245    }
246}
247
248impl Deref for CpuSet {
249    type Target = Vec<usize>;
250
251    fn deref(&self) -> &Self::Target {
252        &self.0
253    }
254}
255
256impl IntoIterator for CpuSet {
257    type Item = usize;
258    type IntoIter = std::vec::IntoIter<Self::Item>;
259
260    fn into_iter(self) -> Self::IntoIter {
261        self.0.into_iter()
262    }
263}
264
265/// Deserializes a `CpuSet` from a sequence which elements can either be integers, or strings
266/// representing CPU ranges (e.g. `5-8`).
267impl<'de> Deserialize<'de> for CpuSet {
268    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
269    where
270        D: serde::Deserializer<'de>,
271    {
272        struct CpuSetVisitor;
273        impl<'de> Visitor<'de> for CpuSetVisitor {
274            type Value = CpuSet;
275
276            fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
277                formatter.write_str("CpuSet")
278            }
279
280            fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
281            where
282                A: serde::de::SeqAccess<'de>,
283            {
284                #[derive(Deserialize)]
285                #[serde(untagged)]
286                enum CpuSetValue<'a> {
287                    Single(usize),
288                    Range(&'a str),
289                }
290
291                let mut cpus = Vec::new();
292                while let Some(cpuset) = seq.next_element::<CpuSetValue>()? {
293                    match cpuset {
294                        CpuSetValue::Single(cpu) => cpus.push(cpu),
295                        CpuSetValue::Range(range) => {
296                            parse_cpu_range(range, &mut cpus).map_err(serde::de::Error::custom)?;
297                        }
298                    }
299                }
300
301                Ok(CpuSet::new(cpus))
302            }
303        }
304
305        deserializer.deserialize_seq(CpuSetVisitor)
306    }
307}
308
309/// Serializes a `CpuSet` into a sequence of integers and strings representing CPU ranges.
310impl Serialize for CpuSet {
311    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
312    where
313        S: serde::Serializer,
314    {
315        use serde::ser::SerializeSeq;
316
317        let mut seq = serializer.serialize_seq(None)?;
318
319        // Factorize ranges into "a-b" strings.
320        let mut serialize_range = |start: usize, end: usize| -> Result<(), S::Error> {
321            if start == end {
322                seq.serialize_element(&start)?;
323            } else {
324                seq.serialize_element(&format!("{start}-{end}"))?;
325            }
326
327            Ok(())
328        };
329
330        // Current range.
331        let mut range = None;
332        for core in &self.0 {
333            range = match range {
334                None => Some((core, core)),
335                Some((start, end)) if *end == *core - 1 => Some((start, core)),
336                Some((start, end)) => {
337                    serialize_range(*start, *end)?;
338                    Some((core, core))
339                }
340            };
341        }
342
343        if let Some((start, end)) = range {
344            serialize_range(*start, *end)?;
345        }
346
347        seq.end()
348    }
349}
350
351/// Mapping of guest VCPU threads to host CPU cores.
352#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)]
353pub enum VcpuAffinity {
354    /// All VCPU threads will be pinned to the same set of host CPU cores.
355    Global(CpuSet),
356    /// Each VCPU may be pinned to a set of host CPU cores.
357    /// The map key is a guest VCPU index, and the corresponding value is the set of
358    /// host CPU indices that the VCPU thread will be allowed to run on.
359    /// If a VCPU index is not present in the map, its affinity will not be set.
360    PerVcpu(BTreeMap<usize, CpuSet>),
361}
362
363/// Memory region with optional size.
364#[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize, FromKeyValues)]
365pub struct MemoryRegionConfig {
366    pub start: u64,
367    pub size: Option<u64>,
368}
369
370/// General PCI config.
371#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize, FromKeyValues)]
372pub struct PciConfig {
373    /// region for PCI Configuration Access Mechanism
374    #[cfg(target_arch = "aarch64")]
375    pub cam: Option<MemoryRegionConfig>,
376    /// region for PCIe Enhanced Configuration Access Mechanism
377    #[cfg(target_arch = "x86_64")]
378    pub ecam: Option<MemoryRegionConfig>,
379    /// region for non-prefetchable PCI device memory below 4G
380    pub mem: Option<MemoryRegionConfig>,
381}
382
383/// Holds the pieces needed to build a VM. Passed to `build_vm` in the `LinuxArch` trait below to
384/// create a `RunnableLinuxVm`.
385#[sorted]
386pub struct VmComponents {
387    #[cfg(all(target_arch = "x86_64", unix))]
388    pub ac_adapter: bool,
389    pub acpi_sdts: Vec<SDT>,
390    pub android_fstab: Option<File>,
391    pub boot_cpu: usize,
392    pub bootorder_fw_cfg_blob: Vec<u8>,
393    #[cfg(target_arch = "x86_64")]
394    pub break_linux_pci_config_io: bool,
395    pub cpu_capacity: BTreeMap<usize, u32>,
396    pub cpu_clusters: Vec<CpuSet>,
397    #[cfg(all(
398        target_arch = "aarch64",
399        any(target_os = "android", target_os = "linux")
400    ))]
401    pub cpu_frequencies: BTreeMap<usize, Vec<u32>>,
402    pub delay_rt: bool,
403    pub dynamic_power_coefficient: BTreeMap<usize, u32>,
404    pub extra_kernel_params: Vec<String>,
405    #[cfg(target_arch = "x86_64")]
406    pub force_s2idle: bool,
407    pub fw_cfg_enable: bool,
408    pub fw_cfg_parameters: Vec<FwCfgParameters>,
409    pub host_cpu_topology: bool,
410    pub hugepages: bool,
411    pub hv_cfg: hypervisor::Config,
412    pub initrd_image: Option<File>,
413    pub itmt: bool,
414    pub memory_size: u64,
415    pub no_i8042: bool,
416    pub no_rtc: bool,
417    pub no_smt: bool,
418    #[cfg(all(
419        target_arch = "aarch64",
420        any(target_os = "android", target_os = "linux")
421    ))]
422    pub normalized_cpu_ipc_ratios: BTreeMap<usize, u32>,
423    pub pci_config: PciConfig,
424    pub pflash_block_size: u32,
425    pub pflash_image: Option<File>,
426    pub pstore: Option<Pstore>,
427    /// A file to load as pVM firmware. Must be `Some` iff
428    /// `hv_cfg.protection_type == ProtectionType::UnprotectedWithFirmware`.
429    pub pvm_fw: Option<File>,
430    pub rt_cpus: CpuSet,
431    #[cfg(target_arch = "x86_64")]
432    pub smbios: SmbiosOptions,
433    #[cfg(target_arch = "aarch64")]
434    pub sve_config: SveConfig,
435    pub swiotlb: Option<u64>,
436    pub vcpu_affinity: Option<VcpuAffinity>,
437    pub vcpu_count: usize,
438    #[cfg(all(
439        target_arch = "aarch64",
440        any(target_os = "android", target_os = "linux")
441    ))]
442    pub vcpu_domain_paths: BTreeMap<usize, PathBuf>,
443    #[cfg(all(
444        target_arch = "aarch64",
445        any(target_os = "android", target_os = "linux")
446    ))]
447    pub vcpu_domains: BTreeMap<usize, u32>,
448    #[cfg(all(
449        target_arch = "aarch64",
450        any(target_os = "android", target_os = "linux")
451    ))]
452    pub virt_cpufreq_v2: bool,
453    pub vm_image: VmImage,
454}
455
456/// Holds the elements needed to run a Linux VM. Created by `build_vm`.
457#[sorted]
458pub struct RunnableLinuxVm<V: VmArch, Vcpu: VcpuArch> {
459    pub bat_control: Option<BatControl>,
460    pub delay_rt: bool,
461    pub devices_thread: Option<std::thread::JoinHandle<()>>,
462    pub hotplug_bus: BTreeMap<u8, Arc<Mutex<dyn HotPlugBus>>>,
463    pub io_bus: Arc<Bus>,
464    pub irq_chip: Box<dyn IrqChipArch>,
465    pub mmio_bus: Arc<Bus>,
466    pub no_smt: bool,
467    pub pid_debug_label_map: BTreeMap<u32, String>,
468    #[cfg(any(target_os = "android", target_os = "linux"))]
469    pub platform_devices: Vec<Arc<Mutex<dyn BusDevice>>>,
470    pub pm: Option<Arc<Mutex<dyn PmResource + Send>>>,
471    /// Devices to be notified before the system resumes from the S3 suspended state.
472    pub resume_notify_devices: Vec<Arc<Mutex<dyn BusResumeDevice>>>,
473    pub root_config: Arc<Mutex<PciRoot>>,
474    pub rt_cpus: CpuSet,
475    pub suspend_tube: (Arc<Mutex<SendTube>>, RecvTube),
476    pub vcpu_affinity: Option<VcpuAffinity>,
477    pub vcpu_count: usize,
478    pub vcpu_init: Vec<VcpuInitArch>,
479    /// If vcpus is None, then it's the responsibility of the vcpu thread to create vcpus.
480    /// If it's Some, then `build_vm` already created the vcpus.
481    pub vcpus: Option<Vec<Vcpu>>,
482    pub vm: V,
483    pub vm_request_tubes: Vec<Tube>,
484}
485
486/// The device and optional jail.
487pub struct VirtioDeviceStub {
488    pub dev: Box<dyn VirtioDevice>,
489    pub jail: Option<Minijail>,
490}
491
492/// Trait which is implemented for each Linux Architecture in order to
493/// set up the memory, cpus, and system devices and to boot the kernel.
494pub trait LinuxArch {
495    type Error: StdError;
496    type ArchMemoryLayout;
497
498    /// Decide architecture specific memory layout details to be used by later stages of the VM
499    /// setup.
500    fn arch_memory_layout(
501        components: &VmComponents,
502    ) -> std::result::Result<Self::ArchMemoryLayout, Self::Error>;
503
504    /// Returns a Vec of the valid memory addresses as pairs of address and length. These should be
505    /// used to configure the `GuestMemory` structure for the platform.
506    ///
507    /// # Arguments
508    ///
509    /// * `components` - Parts used to determine the memory layout.
510    fn guest_memory_layout(
511        components: &VmComponents,
512        arch_memory_layout: &Self::ArchMemoryLayout,
513        hypervisor: &impl hypervisor::Hypervisor,
514    ) -> std::result::Result<Vec<(GuestAddress, u64, MemoryRegionOptions)>, Self::Error>;
515
516    /// Gets the configuration for a new `SystemAllocator` that fits the given `Vm`'s memory layout.
517    ///
518    /// This is the per-architecture template for constructing the `SystemAllocator`. Platform
519    /// agnostic modifications may be made to this configuration, but the final `SystemAllocator`
520    /// will be at least as strict as this configuration.
521    ///
522    /// # Arguments
523    ///
524    /// * `vm` - The virtual machine to be used as a template for the `SystemAllocator`.
525    fn get_system_allocator_config<V: Vm>(
526        vm: &V,
527        arch_memory_layout: &Self::ArchMemoryLayout,
528    ) -> SystemAllocatorConfig;
529
530    /// Takes `VmComponents` and generates a `RunnableLinuxVm`.
531    ///
532    /// # Arguments
533    ///
534    /// * `components` - Parts to use to build the VM.
535    /// * `vm_evt_wrtube` - Tube used by sub-devices to request that crosvm exit because guest wants
536    ///   to stop/shut down or requested reset.
537    /// * `system_allocator` - Allocator created by this trait's implementation of
538    ///   `get_system_allocator_config`.
539    /// * `serial_parameters` - Definitions for how the serial devices should be configured.
540    /// * `serial_jail` - Jail used for serial devices created here.
541    /// * `battery` - Defines what battery device will be created.
542    /// * `vm` - A VM implementation to build upon.
543    /// * `ramoops_region` - Region allocated for ramoops.
544    /// * `devices` - The devices to be built into the VM.
545    /// * `irq_chip` - The IRQ chip implemention for the VM.
546    /// * `debugcon_jail` - Jail used for debugcon devices created here.
547    /// * `pflash_jail` - Jail used for pflash device created here.
548    /// * `fw_cfg_jail` - Jail used for fw_cfg device created here.
549    /// * `device_tree_overlays` - Device tree overlay binaries
550    fn build_vm<V, Vcpu>(
551        components: VmComponents,
552        arch_memory_layout: &Self::ArchMemoryLayout,
553        vm_evt_wrtube: &SendTube,
554        system_allocator: &mut SystemAllocator,
555        serial_parameters: &BTreeMap<(SerialHardware, u8), SerialParameters>,
556        serial_jail: Option<Minijail>,
557        battery: (Option<BatteryType>, Option<Minijail>),
558        vm: V,
559        ramoops_region: Option<pstore::RamoopsRegion>,
560        devices: Vec<(Box<dyn BusDeviceObj>, Option<Minijail>)>,
561        irq_chip: &mut dyn IrqChipArch,
562        vcpu_ids: &mut Vec<usize>,
563        dump_device_tree_blob: Option<PathBuf>,
564        debugcon_jail: Option<Minijail>,
565        #[cfg(target_arch = "x86_64")] pflash_jail: Option<Minijail>,
566        #[cfg(target_arch = "x86_64")] fw_cfg_jail: Option<Minijail>,
567        #[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
568        guest_suspended_cvar: Option<Arc<(Mutex<bool>, Condvar)>>,
569        device_tree_overlays: Vec<DtbOverlay>,
570        fdt_position: Option<FdtPosition>,
571        no_pmu: bool,
572    ) -> std::result::Result<RunnableLinuxVm<V, Vcpu>, Self::Error>
573    where
574        V: VmArch,
575        Vcpu: VcpuArch;
576
577    /// Configures the vcpu and should be called once per vcpu from the vcpu's thread.
578    ///
579    /// # Arguments
580    ///
581    /// * `vm` - The virtual machine object.
582    /// * `hypervisor` - The `Hypervisor` that created the vcpu.
583    /// * `irq_chip` - The `IrqChip` associated with this vm.
584    /// * `vcpu` - The VCPU object to configure.
585    /// * `vcpu_init` - The data required to initialize VCPU registers and other state.
586    /// * `vcpu_id` - The id of the given `vcpu`.
587    /// * `num_cpus` - Number of virtual CPUs the guest will have.
588    /// * `cpu_config` - CPU feature configurations.
589    fn configure_vcpu<V: Vm>(
590        vm: &V,
591        hypervisor: &dyn HypervisorArch,
592        irq_chip: &mut dyn IrqChipArch,
593        vcpu: &mut dyn VcpuArch,
594        vcpu_init: VcpuInitArch,
595        vcpu_id: usize,
596        num_cpus: usize,
597        cpu_config: Option<CpuConfigArch>,
598    ) -> Result<(), Self::Error>;
599
600    /// Configures and add a pci device into vm
601    fn register_pci_device<V: VmArch, Vcpu: VcpuArch>(
602        linux: &mut RunnableLinuxVm<V, Vcpu>,
603        device: Box<dyn PciDevice>,
604        #[cfg(any(target_os = "android", target_os = "linux"))] minijail: Option<Minijail>,
605        resources: &mut SystemAllocator,
606        hp_control_tube: &mpsc::Sender<PciRootCommand>,
607        #[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
608    ) -> Result<PciAddress, Self::Error>;
609
610    /// Returns frequency map for each of the host's logical cores.
611    fn get_host_cpu_frequencies_khz() -> Result<BTreeMap<usize, Vec<u32>>, Self::Error>;
612
613    /// Returns max-freq map of the host's logical cores.
614    fn get_host_cpu_max_freq_khz() -> Result<BTreeMap<usize, u32>, Self::Error>;
615
616    /// Returns capacity map of the host's logical cores.
617    fn get_host_cpu_capacity() -> Result<BTreeMap<usize, u32>, Self::Error>;
618
619    /// Returns cluster masks for each of the host's logical cores.
620    fn get_host_cpu_clusters() -> Result<Vec<CpuSet>, Self::Error>;
621}
622
623#[cfg(feature = "gdb")]
624pub trait GdbOps<T: VcpuArch> {
625    type Error: StdError;
626
627    /// Reads vCPU's registers.
628    fn read_registers(vcpu: &T) -> Result<<GdbArch as Arch>::Registers, Self::Error>;
629
630    /// Writes vCPU's registers.
631    fn write_registers(vcpu: &T, regs: &<GdbArch as Arch>::Registers) -> Result<(), Self::Error>;
632
633    /// Reads bytes from the guest memory.
634    fn read_memory(
635        vcpu: &T,
636        guest_mem: &GuestMemory,
637        vaddr: GuestAddress,
638        len: usize,
639    ) -> Result<Vec<u8>, Self::Error>;
640
641    /// Writes bytes to the specified guest memory.
642    fn write_memory(
643        vcpu: &T,
644        guest_mem: &GuestMemory,
645        vaddr: GuestAddress,
646        buf: &[u8],
647    ) -> Result<(), Self::Error>;
648
649    /// Reads bytes from the guest register.
650    ///
651    /// Returns an empty vector if `reg_id` is valid but the register is not available.
652    fn read_register(vcpu: &T, reg_id: <GdbArch as Arch>::RegId) -> Result<Vec<u8>, Self::Error>;
653
654    /// Writes bytes to the specified guest register.
655    fn write_register(
656        vcpu: &T,
657        reg_id: <GdbArch as Arch>::RegId,
658        data: &[u8],
659    ) -> Result<(), Self::Error>;
660
661    /// Make the next vCPU's run single-step.
662    fn enable_singlestep(vcpu: &T) -> Result<(), Self::Error>;
663
664    /// Get maximum number of hardware breakpoints.
665    fn get_max_hw_breakpoints(vcpu: &T) -> Result<usize, Self::Error>;
666
667    /// Set hardware breakpoints at the given addresses.
668    fn set_hw_breakpoints(vcpu: &T, breakpoints: &[GuestAddress]) -> Result<(), Self::Error>;
669}
670
671/// Errors for device manager.
672#[sorted]
673#[derive(Error, Debug)]
674pub enum DeviceRegistrationError {
675    /// No more MMIO space available.
676    #[error("no more addresses are available")]
677    AddrsExhausted,
678    /// Could not allocate device address space for the device.
679    #[error("Allocating device addresses: {0}")]
680    AllocateDeviceAddrs(PciDeviceError),
681    /// Could not allocate IO space for the device.
682    #[error("Allocating IO addresses: {0}")]
683    AllocateIoAddrs(PciDeviceError),
684    /// Could not allocate MMIO or IO resource for the device.
685    #[error("Allocating IO resource: {0}")]
686    AllocateIoResource(resources::Error),
687    /// Could not allocate an IRQ number.
688    #[error("Allocating IRQ number")]
689    AllocateIrq,
690    /// Could not allocate IRQ resource for the device.
691    #[cfg(any(target_os = "android", target_os = "linux"))]
692    #[error("Allocating IRQ resource: {0}")]
693    AllocateIrqResource(devices::vfio::VfioError),
694    /// Broken pci topology
695    #[error("pci topology is broken")]
696    BrokenPciTopology,
697    /// Unable to clone a jail for the device.
698    #[cfg(any(target_os = "android", target_os = "linux"))]
699    #[error("failed to clone jail: {0}")]
700    CloneJail(minijail::Error),
701    /// Appending to kernel command line failed.
702    #[error("unable to add device to kernel command line: {0}")]
703    Cmdline(kernel_cmdline::Error),
704    /// Configure window size failed.
705    #[error("failed to configure window size: {0}")]
706    ConfigureWindowSize(PciDeviceError),
707    // Unable to create a pipe.
708    #[error("failed to create pipe: {0}")]
709    CreatePipe(base::Error),
710    // Unable to create a root.
711    #[error("failed to create pci root: {0}")]
712    CreateRoot(anyhow::Error),
713    // Unable to create serial device from serial parameters
714    #[error("failed to create serial device: {0}")]
715    CreateSerialDevice(devices::SerialError),
716    // Unable to create tube
717    #[error("failed to create tube: {0}")]
718    CreateTube(base::TubeError),
719    /// Could not clone an event.
720    #[error("failed to clone event: {0}")]
721    EventClone(base::Error),
722    /// Could not create an event.
723    #[error("failed to create event: {0}")]
724    EventCreate(base::Error),
725    /// Failed to generate ACPI content.
726    #[error("failed to generate ACPI content")]
727    GenerateAcpi,
728    /// No more IRQs are available.
729    #[error("no more IRQs are available")]
730    IrqsExhausted,
731    /// VFIO device is missing a DT symbol.
732    #[error("cannot match VFIO device to DT node due to a missing symbol")]
733    MissingDeviceTreeSymbol,
734    /// Missing a required serial device.
735    #[error("missing required serial device {0}")]
736    MissingRequiredSerialDevice(u8),
737    /// Could not add a device to the mmio bus.
738    #[error("failed to add to mmio bus: {0}")]
739    MmioInsert(BusError),
740    /// Failed to insert device into PCI root.
741    #[error("failed to insert device into PCI root: {0}")]
742    PciRootAddDevice(PciDeviceError),
743    #[cfg(any(target_os = "android", target_os = "linux"))]
744    /// Failed to initialize proxy device for jailed device.
745    #[error("failed to create proxy device: {0}")]
746    ProxyDeviceCreation(devices::ProxyError),
747    #[cfg(any(target_os = "android", target_os = "linux"))]
748    /// Failed to register battery device.
749    #[error("failed to register battery device to VM: {0}")]
750    RegisterBattery(devices::BatteryError),
751    /// Could not register PCI device to pci root bus
752    #[error("failed to register PCI device to pci root bus")]
753    RegisterDevice(SendError<PciRootCommand>),
754    /// Could not register PCI device capabilities.
755    #[error("could not register PCI device capabilities: {0}")]
756    RegisterDeviceCapabilities(PciDeviceError),
757    /// Failed to register ioevent with VM.
758    #[error("failed to register ioevent to VM: {0}")]
759    RegisterIoevent(base::Error),
760    /// Failed to register irq event with VM.
761    #[error("failed to register irq event to VM: {0}")]
762    RegisterIrqfd(base::Error),
763    /// Could not setup VFIO platform IRQ for the device.
764    #[error("Setting up VFIO platform IRQ: {0}")]
765    SetupVfioPlatformIrq(anyhow::Error),
766}
767
768/// Config a PCI device for used by this vm.
769pub fn configure_pci_device<V: VmArch, Vcpu: VcpuArch>(
770    linux: &mut RunnableLinuxVm<V, Vcpu>,
771    mut device: Box<dyn PciDevice>,
772    #[cfg(any(target_os = "android", target_os = "linux"))] jail: Option<Minijail>,
773    resources: &mut SystemAllocator,
774    hp_control_tube: &mpsc::Sender<PciRootCommand>,
775    #[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
776) -> Result<PciAddress, DeviceRegistrationError> {
777    // Allocate PCI device address before allocating BARs.
778    let pci_address = device
779        .allocate_address(resources)
780        .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
781
782    // Allocate ranges that may need to be in the low MMIO region (MmioType::Low).
783    let mmio_ranges = device
784        .allocate_io_bars(resources)
785        .map_err(DeviceRegistrationError::AllocateIoAddrs)?;
786
787    // Allocate device ranges that may be in low or high MMIO after low-only ranges.
788    let device_ranges = device
789        .allocate_device_bars(resources)
790        .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
791
792    // If device is a pcie bridge, add its pci bus to pci root
793    if let Some(pci_bus) = device.get_new_pci_bus() {
794        hp_control_tube
795            .send(PciRootCommand::AddBridge(pci_bus))
796            .map_err(DeviceRegistrationError::RegisterDevice)?;
797        let bar_ranges = Vec::new();
798        device
799            .configure_bridge_window(resources, &bar_ranges)
800            .map_err(DeviceRegistrationError::ConfigureWindowSize)?;
801    }
802
803    // Do not suggest INTx for hot-plug devices.
804    let intx_event = devices::IrqLevelEvent::new().map_err(DeviceRegistrationError::EventCreate)?;
805
806    if let PreferredIrq::Fixed { pin, gsi } = device.preferred_irq() {
807        resources.reserve_irq(gsi);
808
809        device.assign_irq(
810            intx_event
811                .try_clone()
812                .map_err(DeviceRegistrationError::EventClone)?,
813            pin,
814            gsi,
815        );
816
817        linux
818            .irq_chip
819            .as_irq_chip_mut()
820            .register_level_irq_event(gsi, &intx_event, IrqEventSource::from_device(&device))
821            .map_err(DeviceRegistrationError::RegisterIrqfd)?;
822    }
823
824    let mut keep_rds = device.keep_rds();
825    syslog::push_descriptors(&mut keep_rds);
826    cros_tracing::push_descriptors!(&mut keep_rds);
827    metrics::push_descriptors(&mut keep_rds);
828
829    device
830        .register_device_capabilities()
831        .map_err(DeviceRegistrationError::RegisterDeviceCapabilities)?;
832
833    #[cfg(any(target_os = "android", target_os = "linux"))]
834    let arced_dev: Arc<Mutex<dyn BusDevice>> = if let Some(jail) = jail {
835        let proxy = ProxyDevice::new(
836            device,
837            jail,
838            keep_rds,
839            #[cfg(feature = "swap")]
840            swap_controller,
841        )
842        .map_err(DeviceRegistrationError::ProxyDeviceCreation)?;
843        linux
844            .pid_debug_label_map
845            .insert(proxy.pid() as u32, proxy.debug_label());
846        Arc::new(Mutex::new(proxy))
847    } else {
848        device.on_sandboxed();
849        Arc::new(Mutex::new(device))
850    };
851
852    #[cfg(windows)]
853    let arced_dev = {
854        device.on_sandboxed();
855        Arc::new(Mutex::new(device))
856    };
857
858    #[cfg(any(target_os = "android", target_os = "linux"))]
859    hp_control_tube
860        .send(PciRootCommand::Add(pci_address, arced_dev.clone()))
861        .map_err(DeviceRegistrationError::RegisterDevice)?;
862
863    for range in &mmio_ranges {
864        linux
865            .mmio_bus
866            .insert(arced_dev.clone(), range.addr, range.size)
867            .map_err(DeviceRegistrationError::MmioInsert)?;
868    }
869
870    for range in &device_ranges {
871        linux
872            .mmio_bus
873            .insert(arced_dev.clone(), range.addr, range.size)
874            .map_err(DeviceRegistrationError::MmioInsert)?;
875    }
876
877    Ok(pci_address)
878}
879
880// Generate pci topology starting from parent bus
881fn generate_pci_topology(
882    parent_bus: Arc<Mutex<PciBus>>,
883    resources: &mut SystemAllocator,
884    io_ranges: &mut BTreeMap<usize, Vec<BarRange>>,
885    device_ranges: &mut BTreeMap<usize, Vec<BarRange>>,
886    device_addrs: &[PciAddress],
887    devices: &mut Vec<(Box<dyn PciDevice>, Option<Minijail>)>,
888) -> Result<(Vec<BarRange>, u8), DeviceRegistrationError> {
889    let mut bar_ranges = Vec::new();
890    let bus_num = parent_bus.lock().get_bus_num();
891    let mut subordinate_bus = bus_num;
892    for (dev_idx, addr) in device_addrs.iter().enumerate() {
893        // Only target for devices that located on this bus
894        if addr.bus == bus_num {
895            // If this device is a pci bridge (a.k.a., it has a pci bus structure),
896            // create its topology recursively
897            if let Some(child_bus) = devices[dev_idx].0.get_new_pci_bus() {
898                let (child_bar_ranges, child_sub_bus) = generate_pci_topology(
899                    child_bus.clone(),
900                    resources,
901                    io_ranges,
902                    device_ranges,
903                    device_addrs,
904                    devices,
905                )?;
906                let device = &mut devices[dev_idx].0;
907                parent_bus
908                    .lock()
909                    .add_child_bus(child_bus.clone())
910                    .map_err(|_| DeviceRegistrationError::BrokenPciTopology)?;
911                let bridge_window = device
912                    .configure_bridge_window(resources, &child_bar_ranges)
913                    .map_err(DeviceRegistrationError::ConfigureWindowSize)?;
914                bar_ranges.extend(bridge_window);
915
916                let ranges = device
917                    .allocate_io_bars(resources)
918                    .map_err(DeviceRegistrationError::AllocateIoAddrs)?;
919                io_ranges.insert(dev_idx, ranges.clone());
920                bar_ranges.extend(ranges);
921
922                let ranges = device
923                    .allocate_device_bars(resources)
924                    .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
925                device_ranges.insert(dev_idx, ranges.clone());
926                bar_ranges.extend(ranges);
927
928                device.set_subordinate_bus(child_sub_bus);
929
930                subordinate_bus = std::cmp::max(subordinate_bus, child_sub_bus);
931            }
932        }
933    }
934
935    for (dev_idx, addr) in device_addrs.iter().enumerate() {
936        if addr.bus == bus_num {
937            let device = &mut devices[dev_idx].0;
938            // Allocate MMIO for non-bridge devices
939            if device.get_new_pci_bus().is_none() {
940                let ranges = device
941                    .allocate_io_bars(resources)
942                    .map_err(DeviceRegistrationError::AllocateIoAddrs)?;
943                io_ranges.insert(dev_idx, ranges.clone());
944                bar_ranges.extend(ranges);
945
946                let ranges = device
947                    .allocate_device_bars(resources)
948                    .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
949                device_ranges.insert(dev_idx, ranges.clone());
950                bar_ranges.extend(ranges);
951            }
952        }
953    }
954    Ok((bar_ranges, subordinate_bus))
955}
956
957/// Ensure all PCI devices have an assigned PCI address.
958pub fn assign_pci_addresses(
959    devices: &mut [(Box<dyn BusDeviceObj>, Option<Minijail>)],
960    resources: &mut SystemAllocator,
961) -> Result<(), DeviceRegistrationError> {
962    // First allocate devices with a preferred address.
963    for pci_device in devices
964        .iter_mut()
965        .filter_map(|(device, _jail)| device.as_pci_device_mut())
966        .filter(|pci_device| pci_device.preferred_address().is_some())
967    {
968        let _ = pci_device
969            .allocate_address(resources)
970            .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
971    }
972
973    // Then allocate addresses for the remaining devices.
974    for pci_device in devices
975        .iter_mut()
976        .filter_map(|(device, _jail)| device.as_pci_device_mut())
977        .filter(|pci_device| pci_device.preferred_address().is_none())
978    {
979        let _ = pci_device
980            .allocate_address(resources)
981            .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
982    }
983
984    Ok(())
985}
986
987/// Creates a root PCI device for use by this Vm.
988pub fn generate_pci_root(
989    mut devices: Vec<(Box<dyn PciDevice>, Option<Minijail>)>,
990    irq_chip: &mut dyn IrqChip,
991    mmio_bus: Arc<Bus>,
992    mmio_base: GuestAddress,
993    mmio_register_bit_num: usize,
994    io_bus: Arc<Bus>,
995    resources: &mut SystemAllocator,
996    vm: &mut impl Vm,
997    max_irqs: usize,
998    vcfg_base: Option<u64>,
999    #[cfg(feature = "swap")] swap_controller: &mut Option<swap::SwapController>,
1000) -> Result<
1001    (
1002        PciRoot,
1003        Vec<(PciAddress, u32, PciInterruptPin)>,
1004        BTreeMap<u32, String>,
1005        BTreeMap<PciAddress, Vec<u8>>,
1006        BTreeMap<PciAddress, Vec<u8>>,
1007    ),
1008    DeviceRegistrationError,
1009> {
1010    let mut device_addrs = Vec::new();
1011
1012    for (device, _jail) in devices.iter_mut() {
1013        let address = device
1014            .allocate_address(resources)
1015            .map_err(DeviceRegistrationError::AllocateDeviceAddrs)?;
1016        device_addrs.push(address);
1017    }
1018
1019    let mut device_ranges = BTreeMap::new();
1020    let mut io_ranges = BTreeMap::new();
1021    let root_bus = Arc::new(Mutex::new(PciBus::new(0, 0, false)));
1022
1023    generate_pci_topology(
1024        root_bus.clone(),
1025        resources,
1026        &mut io_ranges,
1027        &mut device_ranges,
1028        &device_addrs,
1029        &mut devices,
1030    )?;
1031
1032    let mut root = PciRoot::new(
1033        vm,
1034        Arc::downgrade(&mmio_bus),
1035        mmio_base,
1036        mmio_register_bit_num,
1037        Arc::downgrade(&io_bus),
1038        root_bus,
1039    )
1040    .map_err(DeviceRegistrationError::CreateRoot)?;
1041    #[cfg_attr(windows, allow(unused_mut))]
1042    let mut pid_labels = BTreeMap::new();
1043
1044    // Allocate legacy INTx
1045    let mut pci_irqs = Vec::new();
1046    let mut irqs: Vec<u32> = Vec::new();
1047
1048    // Mapping of (bus, dev, pin) -> IRQ number.
1049    let mut dev_pin_irq = BTreeMap::new();
1050
1051    for (dev_idx, (device, _jail)) in devices.iter_mut().enumerate() {
1052        let pci_address = device_addrs[dev_idx];
1053
1054        let irq = match device.preferred_irq() {
1055            PreferredIrq::Fixed { pin, gsi } => {
1056                // The device reported a preferred IRQ, so use that rather than allocating one.
1057                resources.reserve_irq(gsi);
1058                Some((pin, gsi))
1059            }
1060            PreferredIrq::Any => {
1061                // The device did not provide a preferred IRQ but requested one, so allocate one.
1062
1063                // Choose a pin based on the slot's function number. Function 0 must always use
1064                // INTA# for single-function devices per the PCI spec, and we choose to use INTA#
1065                // for function 0 on multifunction devices and distribute the remaining functions
1066                // evenly across the other pins.
1067                let pin = match pci_address.func % 4 {
1068                    0 => PciInterruptPin::IntA,
1069                    1 => PciInterruptPin::IntB,
1070                    2 => PciInterruptPin::IntC,
1071                    _ => PciInterruptPin::IntD,
1072                };
1073
1074                // If an IRQ number has already been assigned for a different function with this
1075                // (bus, device, pin) combination, use it. Otherwise allocate a new one and insert
1076                // it into the map.
1077                let pin_key = (pci_address.bus, pci_address.dev, pin);
1078                let irq_num = if let Some(irq_num) = dev_pin_irq.get(&pin_key) {
1079                    *irq_num
1080                } else {
1081                    // If we have allocated fewer than `max_irqs` total, add a new irq to the `irqs`
1082                    // pool. Otherwise, share one of the existing `irqs`.
1083                    let irq_num = if irqs.len() < max_irqs {
1084                        let irq_num = resources
1085                            .allocate_irq()
1086                            .ok_or(DeviceRegistrationError::AllocateIrq)?;
1087                        irqs.push(irq_num);
1088                        irq_num
1089                    } else {
1090                        // Pick one of the existing IRQs to share, using `dev_idx` to distribute IRQ
1091                        // sharing evenly across devices.
1092                        irqs[dev_idx % max_irqs]
1093                    };
1094
1095                    dev_pin_irq.insert(pin_key, irq_num);
1096                    irq_num
1097                };
1098                Some((pin, irq_num))
1099            }
1100            PreferredIrq::None => {
1101                // The device does not want an INTx# IRQ.
1102                None
1103            }
1104        };
1105
1106        if let Some((pin, gsi)) = irq {
1107            let intx_event =
1108                devices::IrqLevelEvent::new().map_err(DeviceRegistrationError::EventCreate)?;
1109
1110            device.assign_irq(
1111                intx_event
1112                    .try_clone()
1113                    .map_err(DeviceRegistrationError::EventClone)?,
1114                pin,
1115                gsi,
1116            );
1117
1118            irq_chip
1119                .register_level_irq_event(gsi, &intx_event, IrqEventSource::from_device(device))
1120                .map_err(DeviceRegistrationError::RegisterIrqfd)?;
1121
1122            pci_irqs.push((pci_address, gsi, pin));
1123        }
1124    }
1125
1126    // To prevent issues where device's on_sandbox may spawn thread before all
1127    // sandboxed devices are sandboxed we partition iterator to go over sandboxed
1128    // first. This is needed on linux platforms. On windows, this is a no-op since
1129    // jails are always None, even for sandboxed devices.
1130    let devices = {
1131        let (sandboxed, non_sandboxed): (Vec<_>, Vec<_>) = devices
1132            .into_iter()
1133            .enumerate()
1134            .partition(|(_, (_, jail))| jail.is_some());
1135        sandboxed.into_iter().chain(non_sandboxed)
1136    };
1137
1138    let mut amls = BTreeMap::new();
1139    let mut gpe_scope_amls = BTreeMap::new();
1140    for (dev_idx, dev_value) in devices {
1141        #[cfg(any(target_os = "android", target_os = "linux"))]
1142        let (mut device, jail) = dev_value;
1143        #[cfg(windows)]
1144        let (mut device, _) = dev_value;
1145        let address = device_addrs[dev_idx];
1146
1147        let mut keep_rds = device.keep_rds();
1148        syslog::push_descriptors(&mut keep_rds);
1149        cros_tracing::push_descriptors!(&mut keep_rds);
1150        metrics::push_descriptors(&mut keep_rds);
1151        keep_rds.append(&mut vm.get_memory().as_raw_descriptors());
1152
1153        let ranges = io_ranges.remove(&dev_idx).unwrap_or_default();
1154        let device_ranges = device_ranges.remove(&dev_idx).unwrap_or_default();
1155        device
1156            .register_device_capabilities()
1157            .map_err(DeviceRegistrationError::RegisterDeviceCapabilities)?;
1158
1159        if let Some(vcfg_base) = vcfg_base {
1160            let (methods, shm) = device.generate_acpi_methods();
1161            if !methods.is_empty() {
1162                amls.insert(address, methods);
1163            }
1164            if let Some((offset, mmap)) = shm {
1165                let _ = vm.add_memory_region(
1166                    GuestAddress(vcfg_base + offset as u64),
1167                    Box::new(mmap),
1168                    false,
1169                    false,
1170                    MemCacheType::CacheCoherent,
1171                );
1172            }
1173        }
1174        let gpe_nr = device.set_gpe(resources);
1175
1176        #[cfg(any(target_os = "android", target_os = "linux"))]
1177        let arced_dev: Arc<Mutex<dyn BusDevice>> = if let Some(jail) = jail {
1178            let proxy = ProxyDevice::new(
1179                device,
1180                jail,
1181                keep_rds,
1182                #[cfg(feature = "swap")]
1183                swap_controller,
1184            )
1185            .map_err(DeviceRegistrationError::ProxyDeviceCreation)?;
1186            pid_labels.insert(proxy.pid() as u32, proxy.debug_label());
1187            Arc::new(Mutex::new(proxy))
1188        } else {
1189            device.on_sandboxed();
1190            Arc::new(Mutex::new(device))
1191        };
1192        #[cfg(windows)]
1193        let arced_dev = {
1194            device.on_sandboxed();
1195            Arc::new(Mutex::new(device))
1196        };
1197        root.add_device(address, arced_dev.clone(), vm)
1198            .map_err(DeviceRegistrationError::PciRootAddDevice)?;
1199        for range in &ranges {
1200            mmio_bus
1201                .insert(arced_dev.clone(), range.addr, range.size)
1202                .map_err(DeviceRegistrationError::MmioInsert)?;
1203        }
1204
1205        for range in &device_ranges {
1206            mmio_bus
1207                .insert(arced_dev.clone(), range.addr, range.size)
1208                .map_err(DeviceRegistrationError::MmioInsert)?;
1209        }
1210
1211        if let Some(gpe_nr) = gpe_nr {
1212            if let Some(acpi_path) = root.acpi_path(&address) {
1213                let mut gpe_aml = Vec::new();
1214
1215                GpeScope {}.cast_to_aml_bytes(
1216                    &mut gpe_aml,
1217                    gpe_nr,
1218                    format!("\\{acpi_path}").as_str(),
1219                );
1220                if !gpe_aml.is_empty() {
1221                    gpe_scope_amls.insert(address, gpe_aml);
1222                }
1223            }
1224        }
1225    }
1226
1227    Ok((root, pci_irqs, pid_labels, amls, gpe_scope_amls))
1228}
1229
1230/// Errors for image loading.
1231#[sorted]
1232#[derive(Error, Debug)]
1233pub enum LoadImageError {
1234    #[error("Alignment not a power of two: {0}")]
1235    BadAlignment(u64),
1236    #[error("Getting image size failed: {0}")]
1237    GetLen(io::Error),
1238    #[error("GuestMemory get slice failed: {0}")]
1239    GuestMemorySlice(GuestMemoryError),
1240    #[error("Image size too large: {0}")]
1241    ImageSizeTooLarge(u64),
1242    #[error("No suitable memory region found")]
1243    NoSuitableMemoryRegion,
1244    #[error("Reading image into memory failed: {0}")]
1245    ReadToMemory(io::Error),
1246    #[error("Cannot load zero-sized image")]
1247    ZeroSizedImage,
1248}
1249
1250/// Load an image from a file into guest memory.
1251///
1252/// # Arguments
1253///
1254/// * `guest_mem` - The memory to be used by the guest.
1255/// * `guest_addr` - The starting address to load the image in the guest memory.
1256/// * `max_size` - The amount of space in bytes available in the guest memory for the image.
1257/// * `image` - The file containing the image to be loaded.
1258///
1259/// The size in bytes of the loaded image is returned.
1260pub fn load_image<F>(
1261    guest_mem: &GuestMemory,
1262    image: &mut F,
1263    guest_addr: GuestAddress,
1264    max_size: u64,
1265) -> Result<usize, LoadImageError>
1266where
1267    F: FileReadWriteAtVolatile + FileGetLen,
1268{
1269    let size = image.get_len().map_err(LoadImageError::GetLen)?;
1270
1271    if size > usize::MAX as u64 || size > max_size {
1272        return Err(LoadImageError::ImageSizeTooLarge(size));
1273    }
1274
1275    // This is safe due to the bounds check above.
1276    let size = size as usize;
1277
1278    let guest_slice = guest_mem
1279        .get_slice_at_addr(guest_addr, size)
1280        .map_err(LoadImageError::GuestMemorySlice)?;
1281    image
1282        .read_exact_at_volatile(guest_slice, 0)
1283        .map_err(LoadImageError::ReadToMemory)?;
1284
1285    Ok(size)
1286}
1287
1288/// Load an image from a file into guest memory at the highest possible address.
1289///
1290/// # Arguments
1291///
1292/// * `guest_mem` - The memory to be used by the guest.
1293/// * `image` - The file containing the image to be loaded.
1294/// * `min_guest_addr` - The minimum address of the start of the image.
1295/// * `max_guest_addr` - The address to load the last byte of the image.
1296/// * `region_filter` - The optional filter function for determining if the given guest memory
1297///   region is suitable for loading the image into it.
1298/// * `align` - The minimum alignment of the start address of the image in bytes (must be a power of
1299///   two).
1300///
1301/// The guest address and size in bytes of the loaded image are returned.
1302pub fn load_image_high<F>(
1303    guest_mem: &GuestMemory,
1304    image: &mut F,
1305    min_guest_addr: GuestAddress,
1306    max_guest_addr: GuestAddress,
1307    region_filter: Option<fn(&MemoryRegionInformation) -> bool>,
1308    align: u64,
1309) -> Result<(GuestAddress, usize), LoadImageError>
1310where
1311    F: FileReadWriteAtVolatile + FileGetLen,
1312{
1313    if !align.is_power_of_two() {
1314        return Err(LoadImageError::BadAlignment(align));
1315    }
1316
1317    let max_size = max_guest_addr.offset_from(min_guest_addr) & !(align - 1);
1318    let size = image.get_len().map_err(LoadImageError::GetLen)?;
1319
1320    if size == 0 {
1321        return Err(LoadImageError::ZeroSizedImage);
1322    }
1323
1324    if size > usize::MAX as u64 || size > max_size {
1325        return Err(LoadImageError::ImageSizeTooLarge(size));
1326    }
1327
1328    // Sort the list of guest memory regions by address so we can iterate over them in reverse order
1329    // (high to low).
1330    let mut regions: Vec<_> = guest_mem
1331        .regions()
1332        .filter(region_filter.unwrap_or(|_| true))
1333        .collect();
1334    regions.sort_unstable_by(|a, b| a.guest_addr.cmp(&b.guest_addr));
1335
1336    // Find the highest valid address inside a guest memory region that satisfies the requested
1337    // alignment and min/max address requirements while having enough space for the image.
1338    let guest_addr = regions
1339        .into_iter()
1340        .rev()
1341        .filter_map(|r| {
1342            // Highest address within this region.
1343            let rgn_max_addr = r
1344                .guest_addr
1345                .checked_add((r.size as u64).checked_sub(1)?)?
1346                .min(max_guest_addr);
1347            // Lowest aligned address within this region.
1348            let rgn_start_aligned = r.guest_addr.align(align)?;
1349            // Hypothetical address of the image if loaded at the end of the region.
1350            let image_addr = rgn_max_addr.checked_sub(size - 1)? & !(align - 1);
1351
1352            // Would the image fit within the region?
1353            if image_addr >= rgn_start_aligned {
1354                Some(image_addr)
1355            } else {
1356                None
1357            }
1358        })
1359        .find(|&addr| addr >= min_guest_addr)
1360        .ok_or(LoadImageError::NoSuitableMemoryRegion)?;
1361
1362    // This is safe due to the bounds check above.
1363    let size = size as usize;
1364
1365    let guest_slice = guest_mem
1366        .get_slice_at_addr(guest_addr, size)
1367        .map_err(LoadImageError::GuestMemorySlice)?;
1368    image
1369        .read_exact_at_volatile(guest_slice, 0)
1370        .map_err(LoadImageError::ReadToMemory)?;
1371
1372    Ok((guest_addr, size))
1373}
1374
1375/// SMBIOS table configuration
1376#[derive(Clone, Debug, Default, Serialize, Deserialize, FromKeyValues, PartialEq, Eq)]
1377#[serde(deny_unknown_fields, rename_all = "kebab-case")]
1378pub struct SmbiosOptions {
1379    /// BIOS vendor name.
1380    pub bios_vendor: Option<String>,
1381
1382    /// BIOS version number (free-form string).
1383    pub bios_version: Option<String>,
1384
1385    /// System manufacturer name.
1386    pub manufacturer: Option<String>,
1387
1388    /// System product name.
1389    pub product_name: Option<String>,
1390
1391    /// System serial number (free-form string).
1392    pub serial_number: Option<String>,
1393
1394    /// System UUID.
1395    pub uuid: Option<Uuid>,
1396
1397    /// Additional OEM strings to add to SMBIOS table.
1398    #[serde(default)]
1399    pub oem_strings: Vec<String>,
1400}
1401
1402#[cfg(test)]
1403mod tests {
1404    use serde_keyvalue::from_key_values;
1405    use tempfile::tempfile;
1406
1407    use super::*;
1408
1409    #[test]
1410    fn parse_pstore() {
1411        let res: Pstore = from_key_values("path=/some/path,size=16384").unwrap();
1412        assert_eq!(
1413            res,
1414            Pstore {
1415                path: "/some/path".into(),
1416                size: 16384,
1417            }
1418        );
1419
1420        let res = from_key_values::<Pstore>("path=/some/path");
1421        assert!(res.is_err());
1422
1423        let res = from_key_values::<Pstore>("size=16384");
1424        assert!(res.is_err());
1425
1426        let res = from_key_values::<Pstore>("");
1427        assert!(res.is_err());
1428    }
1429
1430    #[test]
1431    fn deserialize_cpuset_serde_kv() {
1432        let res: CpuSet = from_key_values("[0,4,7]").unwrap();
1433        assert_eq!(res, CpuSet::new(vec![0, 4, 7]));
1434
1435        let res: CpuSet = from_key_values("[9-12]").unwrap();
1436        assert_eq!(res, CpuSet::new(vec![9, 10, 11, 12]));
1437
1438        let res: CpuSet = from_key_values("[0,4,7,9-12,15]").unwrap();
1439        assert_eq!(res, CpuSet::new(vec![0, 4, 7, 9, 10, 11, 12, 15]));
1440    }
1441
1442    #[test]
1443    fn deserialize_serialize_cpuset_json() {
1444        let json_str = "[0,4,7]";
1445        let cpuset = CpuSet::new(vec![0, 4, 7]);
1446        let res: CpuSet = serde_json::from_str(json_str).unwrap();
1447        assert_eq!(res, cpuset);
1448        assert_eq!(serde_json::to_string(&cpuset).unwrap(), json_str);
1449
1450        let json_str = r#"["9-12"]"#;
1451        let cpuset = CpuSet::new(vec![9, 10, 11, 12]);
1452        let res: CpuSet = serde_json::from_str(json_str).unwrap();
1453        assert_eq!(res, cpuset);
1454        assert_eq!(serde_json::to_string(&cpuset).unwrap(), json_str);
1455
1456        let json_str = r#"[0,4,7,"9-12",15]"#;
1457        let cpuset = CpuSet::new(vec![0, 4, 7, 9, 10, 11, 12, 15]);
1458        let res: CpuSet = serde_json::from_str(json_str).unwrap();
1459        assert_eq!(res, cpuset);
1460        assert_eq!(serde_json::to_string(&cpuset).unwrap(), json_str);
1461    }
1462
1463    #[test]
1464    fn load_image_high_max_4g() {
1465        let mem = GuestMemory::new(&[
1466            (GuestAddress(0x0000_0000), 0x4000_0000), // 0x00000000..0x40000000
1467            (GuestAddress(0x8000_0000), 0x4000_0000), // 0x80000000..0xC0000000
1468        ])
1469        .unwrap();
1470
1471        const TEST_IMAGE_SIZE: u64 = 1234;
1472        let mut test_image = tempfile().unwrap();
1473        test_image.set_len(TEST_IMAGE_SIZE).unwrap();
1474
1475        const TEST_ALIGN: u64 = 0x8000;
1476        let (addr, size) = load_image_high(
1477            &mem,
1478            &mut test_image,
1479            GuestAddress(0x8000),
1480            GuestAddress(0xFFFF_FFFF), // max_guest_addr beyond highest guest memory region
1481            None,
1482            TEST_ALIGN,
1483        )
1484        .unwrap();
1485
1486        assert_eq!(addr, GuestAddress(0xBFFF_8000));
1487        assert_eq!(addr.offset() % TEST_ALIGN, 0);
1488        assert_eq!(size, TEST_IMAGE_SIZE as usize);
1489    }
1490}