1use std::arch::x86_64::CpuidResult;
6use std::arch::x86_64::__cpuid;
7use std::arch::x86_64::__cpuid_count;
8use std::cmp;
9use std::result;
10
11use devices::Apic;
12use devices::IrqChipCap;
13use devices::IrqChipX86_64;
14use hypervisor::CpuConfigX86_64;
15use hypervisor::CpuHybridType;
16use hypervisor::CpuIdEntry;
17use hypervisor::HypervisorCap;
18use hypervisor::HypervisorX86_64;
19use hypervisor::VcpuX86_64;
20use remain::sorted;
21use thiserror::Error;
22
23use crate::CpuManufacturer;
24
25#[sorted]
26#[derive(Error, Debug, PartialEq, Eq)]
27pub enum Error {
28 #[error("GetSupportedCpus ioctl failed: {0}")]
29 GetSupportedCpusFailed(base::Error),
30 #[error("SetSupportedCpus ioctl failed: {0}")]
31 SetSupportedCpusFailed(base::Error),
32}
33
34pub type Result<T> = result::Result<T, Error>;
35
36pub const EBX_CLFLUSH_CACHELINE: u32 = 8; pub const EBX_CLFLUSH_SIZE_SHIFT: u32 = 8; pub const EBX_CPU_COUNT_SHIFT: u32 = 16; pub const EBX_CPUID_SHIFT: u32 = 24; pub const ECX_EPB_SHIFT: u32 = 3; pub const ECX_X2APIC_SHIFT: u32 = 21; pub const ECX_TSC_DEADLINE_TIMER_SHIFT: u32 = 24; pub const ECX_HYPERVISOR_SHIFT: u32 = 31; pub const EDX_HTT_SHIFT: u32 = 28; pub const ECX_TOPO_TYPE_SHIFT: u32 = 8; pub const ECX_TOPO_SMT_TYPE: u32 = 1; pub const ECX_TOPO_CORE_TYPE: u32 = 2; pub const ECX_HCFC_PERF_SHIFT: u32 = 0; pub const EAX_CPU_CORES_SHIFT: u32 = 26; pub const EDX_HYBRID_CPU_SHIFT: u32 = 15; pub const EAX_HWP_SHIFT: u32 = 7; pub const EAX_HWP_NOTIFICATION_SHIFT: u32 = 8; pub const EAX_HWP_EPP_SHIFT: u32 = 10; pub const EAX_ITMT_SHIFT: u32 = 14; pub const EAX_CORE_TEMP: u32 = 0; pub const EAX_PKG_TEMP: u32 = 6; pub const EAX_CORE_TYPE_SHIFT: u32 = 24; const EAX_CORE_TYPE_ATOM: u32 = 0x20; const EAX_CORE_TYPE_CORE: u32 = 0x40; #[derive(Clone, Debug, PartialEq, Eq)]
65pub struct CpuIdContext {
66 vcpu_id: usize,
68 cpu_count: usize,
70 x2apic: bool,
72 tsc_deadline_timer: bool,
74 apic_frequency: u32,
76 tsc_frequency: Option<u64>,
78 cpu_config: CpuConfigX86_64,
80 cpuid_count: unsafe fn(u32, u32) -> CpuidResult,
82 cpuid: unsafe fn(u32) -> CpuidResult,
84}
85
86impl CpuIdContext {
87 pub fn new(
88 vcpu_id: usize,
89 cpu_count: usize,
90 irq_chip: Option<&dyn IrqChipX86_64>,
91 cpu_config: CpuConfigX86_64,
92 calibrated_tsc_leaf_required: bool,
93 cpuid_count: unsafe fn(u32, u32) -> CpuidResult,
94 cpuid: unsafe fn(u32) -> CpuidResult,
95 ) -> CpuIdContext {
96 CpuIdContext {
97 vcpu_id,
98 cpu_count,
99 x2apic: irq_chip.is_some_and(|chip| chip.check_capability(IrqChipCap::X2Apic)),
100 tsc_deadline_timer: irq_chip
101 .is_some_and(|chip| chip.check_capability(IrqChipCap::TscDeadlineTimer)),
102 apic_frequency: irq_chip.map_or(Apic::frequency(), |chip| chip.lapic_frequency()),
103 tsc_frequency: if calibrated_tsc_leaf_required || cpu_config.force_calibrated_tsc_leaf {
104 devices::tsc::tsc_frequency().ok()
105 } else {
106 None
107 },
108 cpu_config,
109 cpuid_count,
110 cpuid,
111 }
112 }
113}
114
115pub fn adjust_cpuid(entry: &mut CpuIdEntry, ctx: &CpuIdContext) {
122 match entry.function {
123 0 => {
124 if ctx.tsc_frequency.is_some() {
125 entry.cpuid.eax = cmp::max(0x15, entry.cpuid.eax);
127 }
128 }
129 1 => {
130 if entry.index == 0 {
132 entry.cpuid.ecx |= 1 << ECX_HYPERVISOR_SHIFT;
133 }
134 if ctx.x2apic {
135 entry.cpuid.ecx |= 1 << ECX_X2APIC_SHIFT;
136 } else {
137 entry.cpuid.ecx &= !(1 << ECX_X2APIC_SHIFT);
138 }
139 if ctx.tsc_deadline_timer {
140 entry.cpuid.ecx |= 1 << ECX_TSC_DEADLINE_TIMER_SHIFT;
141 }
142
143 if ctx.cpu_config.host_cpu_topology {
144 entry.cpuid.ebx |= EBX_CLFLUSH_CACHELINE << EBX_CLFLUSH_SIZE_SHIFT;
145
146 let result = unsafe { (ctx.cpuid)(entry.function) };
149 entry.cpuid.edx |= result.edx & (1 << EDX_HTT_SHIFT);
150 return;
151 }
152
153 entry.cpuid.ebx = (ctx.vcpu_id << EBX_CPUID_SHIFT) as u32
154 | (EBX_CLFLUSH_CACHELINE << EBX_CLFLUSH_SIZE_SHIFT);
155 if ctx.cpu_count > 1 {
156 entry.cpuid.ebx |= (ctx.cpu_count as u32) << EBX_CPU_COUNT_SHIFT;
158 entry.cpuid.edx |= 1 << EDX_HTT_SHIFT;
162 }
163 }
164 2 | 0x80000002 | 0x80000003 | 0x80000004 | 0x80000005 | 0x80000006 => entry.cpuid = {
168 unsafe { (ctx.cpuid)(entry.function) }},
170 4 => {
171 entry.cpuid = {
172 unsafe { (ctx.cpuid_count)(entry.function, entry.index) }};
174
175 if ctx.cpu_config.host_cpu_topology {
176 return;
177 }
178
179 entry.cpuid.eax &= !0xFC000000;
180 if ctx.cpu_count > 1 {
181 let cpu_cores = if ctx.cpu_config.no_smt {
182 ctx.cpu_count as u32
183 } else if ctx.cpu_count % 2 == 0 {
184 (ctx.cpu_count >> 1) as u32
185 } else {
186 1
187 };
188 entry.cpuid.eax |= (cpu_cores - 1) << EAX_CPU_CORES_SHIFT;
189 }
190 }
191 6 => {
192 let result = {
193 unsafe { (ctx.cpuid)(entry.function) }};
197
198 if ctx.cpu_config.enable_hwp {
199 entry.cpuid.eax |= result.eax & (1 << EAX_HWP_SHIFT);
200 entry.cpuid.eax |= result.eax & (1 << EAX_HWP_NOTIFICATION_SHIFT);
201 entry.cpuid.eax |= result.eax & (1 << EAX_HWP_EPP_SHIFT);
202 entry.cpuid.ecx |= result.ecx & (1 << ECX_EPB_SHIFT);
203
204 if ctx.cpu_config.itmt {
205 entry.cpuid.eax |= result.eax & (1 << EAX_ITMT_SHIFT);
206 }
207 }
208 }
209 7 => {
210 if ctx.cpu_config.host_cpu_topology && entry.index == 0 {
211 let result = unsafe { (ctx.cpuid_count)(entry.function, entry.index) };
215 entry.cpuid.edx |= result.edx & (1 << EDX_HYBRID_CPU_SHIFT);
216 }
217 if ctx.cpu_config.hybrid_type.is_some() && entry.index == 0 {
218 entry.cpuid.edx |= 1 << EDX_HYBRID_CPU_SHIFT;
219 }
220 }
221 0x15 => {
222 if let Some(tsc_freq) = ctx.tsc_frequency {
223 entry.cpuid = devices::tsc::fake_tsc_frequency_cpuid(tsc_freq, ctx.apic_frequency);
225 }
226 }
227 0x1A => {
228 if ctx.cpu_config.host_cpu_topology {
230 entry.cpuid = unsafe { (ctx.cpuid)(entry.function) };
234 }
235 if let Some(hybrid) = &ctx.cpu_config.hybrid_type {
236 match hybrid {
237 CpuHybridType::Atom => {
238 entry.cpuid.eax |= EAX_CORE_TYPE_ATOM << EAX_CORE_TYPE_SHIFT;
239 }
240 CpuHybridType::Core => {
241 entry.cpuid.eax |= EAX_CORE_TYPE_CORE << EAX_CORE_TYPE_SHIFT;
242 }
243 }
244 }
245 }
246 0xB | 0x1F => {
247 if ctx.cpu_config.host_cpu_topology {
248 return;
249 }
250 entry.cpuid.edx = ctx.vcpu_id as u32; if entry.index == 0 {
256 if ctx.cpu_config.no_smt || (ctx.cpu_count == 1) {
257 entry.cpuid.eax = 0; entry.cpuid.ebx = 1; } else if ctx.cpu_count % 2 == 0 {
262 entry.cpuid.eax = 1; entry.cpuid.ebx = 2; } else {
266 let cpu_bits: u32 = 32 - ((ctx.cpu_count - 1) as u32).leading_zeros();
268 entry.cpuid.eax = cpu_bits; entry.cpuid.ebx = ctx.cpu_count as u32; }
271 entry.cpuid.ecx = (ECX_TOPO_SMT_TYPE << ECX_TOPO_TYPE_SHIFT) | entry.index;
272 } else if entry.index == 1 {
273 let cpu_bits: u32 = 32 - ((ctx.cpu_count - 1) as u32).leading_zeros();
274 entry.cpuid.eax = cpu_bits;
275 entry.cpuid.ebx = (ctx.cpu_count as u32) & 0xffff;
277 entry.cpuid.ecx = (ECX_TOPO_CORE_TYPE << ECX_TOPO_TYPE_SHIFT) | entry.index;
278 } else {
279 entry.cpuid.eax = 0;
280 entry.cpuid.ebx = 0;
281 entry.cpuid.ecx = 0;
282 }
283 }
284 _ => (),
285 }
286}
287
288pub fn filter_cpuid(cpuid: &mut hypervisor::CpuId, ctx: &CpuIdContext) {
291 if ctx.tsc_frequency.is_some()
294 && !cpuid
295 .cpu_id_entries
296 .iter()
297 .any(|entry| entry.function == 0x15)
298 {
299 cpuid.cpu_id_entries.push(CpuIdEntry {
300 function: 0x15,
301 index: 0,
302 flags: 0,
303 cpuid: CpuidResult {
304 eax: 0,
305 ebx: 0,
306 ecx: 0,
307 edx: 0,
308 },
309 })
310 }
311
312 let entries = &mut cpuid.cpu_id_entries;
313 for entry in entries.iter_mut() {
314 adjust_cpuid(entry, ctx);
315 }
316}
317
318pub fn setup_cpuid(
329 hypervisor: &dyn HypervisorX86_64,
330 irq_chip: &dyn IrqChipX86_64,
331 vcpu: &dyn VcpuX86_64,
332 vcpu_id: usize,
333 nrcpus: usize,
334 cpu_config: CpuConfigX86_64,
335) -> Result<()> {
336 let mut cpuid = hypervisor
337 .get_supported_cpuid()
338 .map_err(Error::GetSupportedCpusFailed)?;
339
340 filter_cpuid(
341 &mut cpuid,
342 &CpuIdContext::new(
343 vcpu_id,
344 nrcpus,
345 Some(irq_chip),
346 cpu_config,
347 hypervisor.check_capability(HypervisorCap::CalibratedTscLeafRequired),
348 __cpuid_count,
349 __cpuid,
350 ),
351 );
352
353 vcpu.set_cpuid(&cpuid)
354 .map_err(Error::SetSupportedCpusFailed)
355}
356
357const MANUFACTURER_ID_FUNCTION: u32 = 0x00000000;
358const AMD_EBX: u32 = u32::from_le_bytes([b'A', b'u', b't', b'h']);
359const AMD_EDX: u32 = u32::from_le_bytes([b'e', b'n', b't', b'i']);
360const AMD_ECX: u32 = u32::from_le_bytes([b'c', b'A', b'M', b'D']);
361const INTEL_EBX: u32 = u32::from_le_bytes([b'G', b'e', b'n', b'u']);
362const INTEL_EDX: u32 = u32::from_le_bytes([b'i', b'n', b'e', b'I']);
363const INTEL_ECX: u32 = u32::from_le_bytes([b'n', b't', b'e', b'l']);
364
365pub fn cpu_manufacturer() -> CpuManufacturer {
366 let result = unsafe { __cpuid(MANUFACTURER_ID_FUNCTION) };
370 if result.ebx == AMD_EBX && result.edx == AMD_EDX && result.ecx == AMD_ECX {
371 return CpuManufacturer::Amd;
372 } else if result.ebx == INTEL_EBX && result.edx == INTEL_EDX && result.ecx == INTEL_ECX {
373 return CpuManufacturer::Intel;
374 }
375 CpuManufacturer::Unknown
376}
377
378#[cfg(test)]
379mod tests {
380 use super::*;
381
382 #[test]
383 fn cpu_manufacturer_test() {
384 let manufacturer = cpu_manufacturer();
386 assert_ne!(manufacturer, CpuManufacturer::Unknown);
387 }
388
389 #[test]
390 fn cpuid_copies_register() {
391 let fake_cpuid_count = |_function: u32, _index: u32| CpuidResult {
392 eax: 27,
393 ebx: 18,
394 ecx: 28,
395 edx: 18,
396 };
397 let fake_cpuid = |_function: u32| CpuidResult {
398 eax: 0,
399 ebx: 0,
400 ecx: 0,
401 edx: 0,
402 };
403 let cpu_config = CpuConfigX86_64 {
404 force_calibrated_tsc_leaf: false,
405 host_cpu_topology: true,
406 enable_hwp: false,
407 no_smt: false,
408 itmt: false,
409 hybrid_type: None,
410 };
411 let ctx = CpuIdContext {
412 vcpu_id: 0,
413 cpu_count: 0,
414 x2apic: false,
415 tsc_deadline_timer: false,
416 apic_frequency: 0,
417 tsc_frequency: None,
418 cpu_config,
419 cpuid_count: fake_cpuid_count,
420 cpuid: fake_cpuid,
421 };
422 let mut cpu_id_entry = CpuIdEntry {
423 function: 0x4,
424 index: 0,
425 flags: 0,
426 cpuid: CpuidResult {
427 eax: 31,
428 ebx: 41,
429 ecx: 59,
430 edx: 26,
431 },
432 };
433 adjust_cpuid(&mut cpu_id_entry, &ctx);
434 assert_eq!(cpu_id_entry.cpuid.eax, 27)
435 }
436}