1use std::arch::x86_64::CpuidResult;
6use std::collections::BTreeMap;
7use std::sync::Arc;
8
9use base::errno_result;
10use base::error;
11use base::ioctl;
12use base::ioctl_with_mut_ptr;
13use base::ioctl_with_mut_ref;
14use base::ioctl_with_ptr;
15use base::ioctl_with_ref;
16use base::ioctl_with_val;
17use base::AsRawDescriptor;
18use base::Error;
19use base::IoctlNr;
20use base::MappedRegion;
21use base::Result;
22use kvm_sys::*;
23use libc::E2BIG;
24use libc::EAGAIN;
25use libc::EINVAL;
26use libc::EIO;
27use libc::ENOMEM;
28use libc::ENXIO;
29use serde::Deserialize;
30use serde::Serialize;
31use snapshot::AnySnapshot;
32use vm_memory::GuestAddress;
33use zerocopy::FromZeros;
34
35use super::Config;
36use super::Kvm;
37use super::KvmCap;
38use super::KvmVcpu;
39use super::KvmVm;
40use crate::host_phys_addr_bits;
41use crate::ClockState;
42use crate::CpuId;
43use crate::CpuIdEntry;
44use crate::DebugRegs;
45use crate::DescriptorTable;
46use crate::DeviceKind;
47use crate::Fpu;
48use crate::FpuReg;
49use crate::HypervisorX86_64;
50use crate::IoapicRedirectionTableEntry;
51use crate::IoapicState;
52use crate::IrqSourceChip;
53use crate::LapicState;
54use crate::PicSelect;
55use crate::PicState;
56use crate::PitChannelState;
57use crate::PitState;
58use crate::ProtectionType;
59use crate::Regs;
60use crate::Segment;
61use crate::Sregs;
62use crate::VcpuExit;
63use crate::VcpuX86_64;
64use crate::VmCap;
65use crate::VmX86_64;
66use crate::Xsave;
67use crate::NUM_IOAPIC_PINS;
68
69const KVM_XSAVE_MAX_SIZE: usize = 4096;
70const MSR_IA32_APICBASE: u32 = 0x0000001b;
71
72#[derive(Debug, Clone, Serialize, Deserialize)]
73pub struct VcpuEvents {
74 pub exception: VcpuExceptionState,
75 pub interrupt: VcpuInterruptState,
76 pub nmi: VcpuNmiState,
77 pub sipi_vector: Option<u32>,
78 pub smi: VcpuSmiState,
79 pub triple_fault: VcpuTripleFaultState,
80 pub exception_payload: Option<u64>,
81}
82
83#[derive(Debug, Clone, Serialize, Deserialize)]
84pub struct VcpuExceptionState {
85 pub injected: bool,
86 pub nr: u8,
87 pub has_error_code: bool,
88 pub pending: Option<bool>,
89 pub error_code: u32,
90}
91
92#[derive(Debug, Clone, Serialize, Deserialize)]
93pub struct VcpuInterruptState {
94 pub injected: bool,
95 pub nr: u8,
96 pub soft: bool,
97 pub shadow: Option<u8>,
98}
99
100#[derive(Debug, Clone, Serialize, Deserialize)]
101pub struct VcpuNmiState {
102 pub injected: bool,
103 pub pending: Option<bool>,
104 pub masked: bool,
105}
106
107#[derive(Debug, Clone, Serialize, Deserialize)]
108pub struct VcpuSmiState {
109 pub smm: Option<bool>,
110 pub pending: bool,
111 pub smm_inside_nmi: bool,
112 pub latched_init: u8,
113}
114
115#[derive(Debug, Clone, Serialize, Deserialize)]
116pub struct VcpuTripleFaultState {
117 pub pending: Option<bool>,
118}
119
120pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
121 descriptor: &T,
122 kind: IoctlNr,
123 initial_capacity: usize,
124) -> Result<CpuId> {
125 let mut entries: usize = initial_capacity;
126
127 loop {
128 let mut kvm_cpuid =
129 kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(entries).unwrap();
130 kvm_cpuid.nent = entries.try_into().unwrap();
131
132 let ret = {
133 unsafe { ioctl_with_mut_ref(descriptor, kind, &mut *kvm_cpuid) }
138 };
139 if ret < 0 {
140 let err = Error::last();
141 match err.errno() {
142 E2BIG => {
143 if let Some(val) = entries.checked_mul(2) {
145 entries = val;
146 } else {
147 return Err(err);
148 }
149 }
150 _ => return Err(err),
151 }
152 } else {
153 return Ok(CpuId::from(&*kvm_cpuid));
154 }
155 }
156}
157
158impl Kvm {
159 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
160 const KVM_MAX_ENTRIES: usize = 256;
161 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
162 }
163
164 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
165 if protection_type.isolates_memory() {
166 Ok(KVM_X86_PKVM_PROTECTED_VM)
167 } else {
168 Ok(KVM_X86_DEFAULT_VM)
169 }
170 }
171
172 pub fn get_guest_phys_addr_bits(&self) -> u8 {
174 host_phys_addr_bits()
176 }
177}
178
179impl HypervisorX86_64 for Kvm {
180 fn get_supported_cpuid(&self) -> Result<CpuId> {
181 self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
182 }
183
184 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
185 const MAX_KVM_MSR_ENTRIES: usize = 256;
186
187 let mut msr_list = kvm_msr_list::<[u32; MAX_KVM_MSR_ENTRIES]>::new_zeroed();
188 msr_list.nmsrs = MAX_KVM_MSR_ENTRIES as u32;
189
190 let ret = {
191 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list) }
196 };
197 if ret < 0 {
198 return errno_result();
199 }
200
201 let mut nmsrs = msr_list.nmsrs;
202 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
203 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
204 }
205
206 Ok(msr_list.indices[..nmsrs as usize].to_vec())
207 }
208}
209
210impl KvmVm {
211 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
213 Ok(())
214 }
215
216 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
219 match c {
220 VmCap::PvClock => Some(true),
221 _ => None,
222 }
223 }
224
225 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
228 None
229 }
230
231 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
233 let mut clock_data: kvm_clock_data = Default::default();
234 let ret =
235 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
239 if ret == 0 {
240 Ok(ClockState::from(&clock_data))
241 } else {
242 errno_result()
243 }
244 }
245
246 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
248 let clock_data = kvm_clock_data::from(state);
249 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
253 if ret == 0 {
254 Ok(())
255 } else {
256 errno_result()
257 }
258 }
259
260 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
264 let mut irqchip_state = kvm_irqchip {
265 chip_id: id as u32,
266 ..Default::default()
267 };
268 let ret = {
269 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
273 };
274 if ret == 0 {
275 Ok(
276 unsafe { irqchip_state.chip.pic },
280 )
281 } else {
282 errno_result()
283 }
284 }
285
286 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
290 let mut irqchip_state = kvm_irqchip {
291 chip_id: id as u32,
292 ..Default::default()
293 };
294 irqchip_state.chip.pic = *state;
295 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
299 if ret == 0 {
300 Ok(())
301 } else {
302 errno_result()
303 }
304 }
305
306 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
308 Ok(NUM_IOAPIC_PINS)
309 }
310
311 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
315 let mut irqchip_state = kvm_irqchip {
316 chip_id: 2,
317 ..Default::default()
318 };
319 let ret = {
320 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
324 };
325 if ret == 0 {
326 Ok(
327 unsafe { irqchip_state.chip.ioapic },
331 )
332 } else {
333 errno_result()
334 }
335 }
336
337 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
341 let mut irqchip_state = kvm_irqchip {
342 chip_id: 2,
343 ..Default::default()
344 };
345 irqchip_state.chip.ioapic = *state;
346 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
350 if ret == 0 {
351 Ok(())
352 } else {
353 errno_result()
354 }
355 }
356
357 pub fn create_pit(&self) -> Result<()> {
361 let pit_config = kvm_pit_config::default();
362 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
366 if ret == 0 {
367 Ok(())
368 } else {
369 errno_result()
370 }
371 }
372
373 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
377 let mut pit_state = Default::default();
378 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
382 if ret == 0 {
383 Ok(pit_state)
384 } else {
385 errno_result()
386 }
387 }
388
389 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
393 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
397 if ret == 0 {
398 Ok(())
399 } else {
400 errno_result()
401 }
402 }
403
404 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
406 let mut cap = kvm_enable_cap {
407 cap: KVM_CAP_MSR_PLATFORM_INFO,
408 ..Default::default()
409 };
410 cap.args[0] = allow_read as u64;
411
412 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
417 if ret < 0 {
418 errno_result()
419 } else {
420 Ok(())
421 }
422 }
423
424 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
426 let mut cap = kvm_enable_cap {
427 cap: KVM_CAP_SPLIT_IRQCHIP,
428 ..Default::default()
429 };
430 cap.args[0] = ioapic_pins as u64;
431 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
435 if ret < 0 {
436 errno_result()
437 } else {
438 Ok(())
439 }
440 }
441
442 fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
449 let mut info = KvmProtectedVmInfo {
450 firmware_size: 0,
451 reserved: [0; 7],
452 };
453 unsafe {
457 self.enable_raw_capability(
458 KvmCap::X86ProtectedVm,
459 KVM_CAP_X86_PROTECTED_VM_FLAGS_INFO,
460 &[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
461 )
462 }?;
463 Ok(info)
464 }
465
466 fn set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()> {
467 unsafe {
470 self.enable_raw_capability(
471 KvmCap::X86ProtectedVm,
472 KVM_CAP_X86_PROTECTED_VM_FLAGS_SET_FW_GPA,
473 &[fw_addr.0, 0, 0, 0],
474 )
475 }
476 }
477}
478
479#[repr(C)]
480struct KvmProtectedVmInfo {
481 firmware_size: u64,
482 reserved: [u64; 7],
483}
484
485impl VmX86_64 for KvmVm {
486 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
487 &self.kvm
488 }
489
490 fn load_protected_vm_firmware(&self, fw_addr: GuestAddress, fw_max_size: u64) -> Result<()> {
491 let info = self.get_protected_vm_info()?;
492 if info.firmware_size == 0 {
493 Err(Error::new(EINVAL))
494 } else {
495 if info.firmware_size > fw_max_size {
496 return Err(Error::new(ENOMEM));
497 }
498 self.set_protected_vm_firmware_gpa(fw_addr)
499 }
500 }
501
502 fn create_vcpu(&self, id: usize) -> Result<Arc<dyn VcpuX86_64>> {
503 Ok(Arc::new(KvmVm::create_kvm_vcpu(self, id)?))
506 }
507
508 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
512 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
515 if ret == 0 {
516 Ok(())
517 } else {
518 errno_result()
519 }
520 }
521
522 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
526 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
529 if ret == 0 {
530 Ok(())
531 } else {
532 errno_result()
533 }
534 }
535}
536
537impl KvmVcpu {
538 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
541 Ok(VcpuExit::SystemEventReset)
542 }
543
544 fn xsave_size(&self) -> Result<usize> {
551 let size = {
552 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
555 };
556 if size < 0 {
557 return errno_result();
558 }
559 let size: usize = size.try_into().unwrap();
561 Ok(size.max(KVM_XSAVE_MAX_SIZE))
562 }
563
564 #[inline]
565 pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
566 match run.exit_reason {
567 KVM_EXIT_IO => Some(VcpuExit::Io),
568 KVM_EXIT_IOAPIC_EOI => {
569 let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
573 Some(VcpuExit::IoapicEoi { vector })
574 }
575 KVM_EXIT_HLT => Some(VcpuExit::Hlt),
576 KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
577 KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
578 KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
579 _ => None,
580 }
581 }
582}
583
584#[derive(Debug, Serialize, Deserialize)]
585struct HypervisorState {
586 interrupts: VcpuEvents,
587 nested_state: Vec<u8>,
588}
589
590impl VcpuX86_64 for KvmVcpu {
591 #[allow(clippy::cast_ptr_alignment)]
592 fn set_interrupt_window_requested(&self, requested: bool) {
593 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
598 run.request_interrupt_window = requested.into();
599 }
600
601 #[allow(clippy::cast_ptr_alignment)]
602 fn ready_for_interrupt(&self) -> bool {
603 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
608 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
609 }
610
611 fn interrupt(&self, irq: u8) -> Result<()> {
616 if !self.ready_for_interrupt() {
617 return Err(Error::new(EAGAIN));
618 }
619
620 let interrupt = kvm_interrupt { irq: irq.into() };
621 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
625 if ret == 0 {
626 Ok(())
627 } else {
628 errno_result()
629 }
630 }
631
632 fn inject_nmi(&self) -> Result<()> {
633 let ret = unsafe { ioctl(self, KVM_NMI) };
636 if ret == 0 {
637 Ok(())
638 } else {
639 errno_result()
640 }
641 }
642
643 fn get_regs(&self) -> Result<Regs> {
644 let mut regs: kvm_regs = Default::default();
645 let ret = {
646 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
651 };
652 if ret == 0 {
653 Ok(Regs::from(®s))
654 } else {
655 errno_result()
656 }
657 }
658
659 fn set_regs(&self, regs: &Regs) -> Result<()> {
660 let regs = kvm_regs::from(regs);
661 let ret = {
662 unsafe { ioctl_with_ref(self, KVM_SET_REGS, ®s) }
667 };
668 if ret == 0 {
669 Ok(())
670 } else {
671 errno_result()
672 }
673 }
674
675 fn get_sregs(&self) -> Result<Sregs> {
676 let mut regs: kvm_sregs = Default::default();
677 let ret = {
678 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
683 };
684 if ret == 0 {
685 Ok(Sregs::from(®s))
686 } else {
687 errno_result()
688 }
689 }
690
691 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
692 let mut kvm_sregs: kvm_sregs = Default::default();
695 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
699 if ret != 0 {
700 return errno_result();
701 }
702
703 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
704 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
705 kvm_sregs.es = kvm_segment::from(&sregs.es);
706 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
707 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
708 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
709 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
710 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
711 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
712 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
713 kvm_sregs.cr0 = sregs.cr0;
714 kvm_sregs.cr2 = sregs.cr2;
715 kvm_sregs.cr3 = sregs.cr3;
716 kvm_sregs.cr4 = sregs.cr4;
717 kvm_sregs.cr8 = sregs.cr8;
718 kvm_sregs.efer = sregs.efer;
719
720 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
724 if ret == 0 {
725 Ok(())
726 } else {
727 errno_result()
728 }
729 }
730
731 fn get_fpu(&self) -> Result<Fpu> {
732 let mut fpu: kvm_fpu = Default::default();
733 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
737 if ret == 0 {
738 Ok(Fpu::from(&fpu))
739 } else {
740 errno_result()
741 }
742 }
743
744 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
745 let fpu = kvm_fpu::from(fpu);
746 let ret = {
747 unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
750 };
751 if ret == 0 {
752 Ok(())
753 } else {
754 errno_result()
755 }
756 }
757
758 fn get_xsave(&self) -> Result<Xsave> {
760 let size = self.xsave_size()?;
761 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
762 KVM_GET_XSAVE2
763 } else {
764 KVM_GET_XSAVE
765 };
766 let mut xsave = Xsave::new(size);
767
768 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
772 if ret == 0 {
773 Ok(xsave)
774 } else {
775 errno_result()
776 }
777 }
778
779 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
780 let size = self.xsave_size()?;
781 if xsave.len() != size {
784 return Err(Error::new(EIO));
785 }
786
787 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
793 if ret == 0 {
794 Ok(())
795 } else {
796 errno_result()
797 }
798 }
799
800 fn get_hypervisor_specific_state(&self) -> Result<AnySnapshot> {
801 let mut vcpu_evts: kvm_vcpu_events = Default::default();
802 let ret = { unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) } };
807 if ret != 0 {
808 return errno_result();
809 }
810 let interrupts = VcpuEvents::from(&vcpu_evts);
811 let ret =
812 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_NESTED_STATE as u64) };
815 if ret < 0 {
816 return errno_result();
817 }
818 let nested_state = if ret == 0 {
820 Vec::new()
821 } else {
822 let mut nested_state: Vec<u8> = vec![0; ret as usize];
823 let nested_state_ptr = nested_state.as_ptr() as *mut kvm_nested_state;
824 assert!(nested_state_ptr.is_aligned());
825 unsafe {
833 (*nested_state_ptr).size = ret as u32;
834 }
835 assert!(nested_state.as_ptr().is_aligned());
836 let ret = unsafe {
840 ioctl_with_mut_ptr(self, KVM_GET_NESTED_STATE, nested_state.as_mut_ptr())
841 };
842 if ret < 0 {
843 return errno_result();
844 }
845 nested_state
846 };
847 AnySnapshot::to_any(HypervisorState {
848 interrupts,
849 nested_state,
850 })
851 .map_err(|e| {
852 error!("failed to serialize hypervisor state: {:?}", e);
853 Error::new(EIO)
854 })
855 }
856
857 fn set_hypervisor_specific_state(&self, data: AnySnapshot) -> Result<()> {
858 let hypervisor_state = AnySnapshot::from_any::<HypervisorState>(data).map_err(|e| {
859 error!("failed to deserialize hypervisor_state: {:?}", e);
860 Error::new(EIO)
861 })?;
862 let vcpu_events = kvm_vcpu_events::from(&hypervisor_state.interrupts);
863 let ret = {
864 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
869 };
870 if ret != 0 {
871 return errno_result();
872 }
873 if hypervisor_state.nested_state.is_empty() {
874 return Ok(());
875 }
876 unsafe {
885 let vec_len = hypervisor_state.nested_state.len();
886 assert!(
887 (hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state).is_aligned()
888 );
889 if (*(hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state)).size
890 > vec_len as u32
891 {
892 error!("Invalued nested state data, size larger than vec allocated.");
893 return Err(Error::new(EINVAL));
894 }
895 }
896 let ret = unsafe {
901 ioctl_with_ptr(
902 self,
903 KVM_SET_NESTED_STATE,
904 hypervisor_state.nested_state.as_ptr(),
905 )
906 };
907 if ret == 0 {
908 Ok(())
909 } else {
910 errno_result()
911 }
912 }
913
914 fn get_debugregs(&self) -> Result<DebugRegs> {
915 let mut regs: kvm_debugregs = Default::default();
916 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
920 if ret == 0 {
921 Ok(DebugRegs::from(®s))
922 } else {
923 errno_result()
924 }
925 }
926
927 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
928 let dregs = kvm_debugregs::from(dregs);
929 let ret = {
930 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
933 };
934 if ret == 0 {
935 Ok(())
936 } else {
937 errno_result()
938 }
939 }
940
941 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
942 let mut regs: kvm_xcrs = Default::default();
943 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
947 if ret < 0 {
948 return errno_result();
949 }
950
951 Ok(regs
952 .xcrs
953 .iter()
954 .take(regs.nr_xcrs as usize)
955 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
956 .collect())
957 }
958
959 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
960 let mut kvm_xcr = kvm_xcrs {
961 nr_xcrs: 1,
962 ..Default::default()
963 };
964 kvm_xcr.xcrs[0].xcr = xcr_index;
965 kvm_xcr.xcrs[0].value = value;
966
967 let ret = {
968 unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
971 };
972 if ret == 0 {
973 Ok(())
974 } else {
975 errno_result()
976 }
977 }
978
979 fn get_msr(&self, msr_index: u32) -> Result<u64> {
980 let mut msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
981 msrs.nmsrs = 1;
982 msrs.entries[0].index = msr_index;
983
984 let ret = {
985 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs) }
988 };
989 if ret < 0 {
990 return errno_result();
991 }
992
993 if ret != 1 {
995 return Err(base::Error::new(libc::ENOENT));
996 }
997
998 Ok(msrs.entries[0].data)
999 }
1000
1001 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
1002 let msr_index_list = self.kvm.get_msr_index_list()?;
1003
1004 let mut kvm_msrs =
1005 kvm_msrs::<[kvm_msr_entry]>::new_box_zeroed_with_elems(msr_index_list.len()).unwrap();
1006 kvm_msrs.nmsrs = msr_index_list.len() as u32;
1007 kvm_msrs
1008 .entries
1009 .iter_mut()
1010 .zip(msr_index_list.iter())
1011 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
1012
1013 let ret = {
1014 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut *kvm_msrs) }
1017 };
1018 if ret < 0 {
1019 return errno_result();
1020 }
1021
1022 let count = ret as usize;
1024 if count != msr_index_list.len() {
1025 error!(
1026 "failed to get all MSRs: requested {}, got {}",
1027 msr_index_list.len(),
1028 count,
1029 );
1030 return Err(base::Error::new(libc::EPERM));
1031 }
1032
1033 let msrs = BTreeMap::from_iter(
1034 kvm_msrs
1035 .entries
1036 .iter()
1037 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
1038 );
1039
1040 Ok(msrs)
1041 }
1042
1043 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1044 let mut kvm_msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
1045 kvm_msrs.nmsrs = 1;
1046 kvm_msrs.entries[0].index = msr_index;
1047 kvm_msrs.entries[0].data = value;
1048
1049 let ret = {
1050 unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs) }
1053 };
1054 if ret < 0 {
1055 return errno_result();
1056 }
1057
1058 if ret != 1 {
1060 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1061 return Err(base::Error::new(libc::EPERM));
1062 }
1063
1064 Ok(())
1065 }
1066
1067 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1068 let cpuid = Box::<kvm_cpuid2<[kvm_cpuid_entry2]>>::from(cpuid);
1069 let ret = {
1070 unsafe { ioctl_with_ref(self, KVM_SET_CPUID2, &*cpuid) }
1073 };
1074 if ret == 0 {
1075 Ok(())
1076 } else {
1077 errno_result()
1078 }
1079 }
1080
1081 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1082 use kvm_sys::*;
1083 let mut dbg: kvm_guest_debug = Default::default();
1084
1085 if addrs.len() > 4 {
1086 error!(
1087 "Support 4 breakpoints at most but {} addresses are passed",
1088 addrs.len()
1089 );
1090 return Err(base::Error::new(libc::EINVAL));
1091 }
1092
1093 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1094 if enable_singlestep {
1095 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1096 }
1097
1098 dbg.arch.debugreg[7] = 0x0600;
1102
1103 for (i, addr) in addrs.iter().enumerate() {
1104 dbg.arch.debugreg[i] = addr.0;
1105 dbg.arch.debugreg[7] |= 2 << (i * 2);
1107 }
1108
1109 let ret = {
1110 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1113 };
1114 if ret == 0 {
1115 Ok(())
1116 } else {
1117 errno_result()
1118 }
1119 }
1120
1121 fn handle_cpuid(&self, _entry: &CpuIdEntry) -> Result<()> {
1123 Err(Error::new(ENXIO))
1124 }
1125
1126 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1127 Ok(())
1129 }
1130}
1131
1132impl KvmVcpu {
1133 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1138 let mut klapic: kvm_lapic_state = Default::default();
1139
1140 let ret = {
1141 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1145 };
1146 if ret < 0 {
1147 return errno_result();
1148 }
1149 Ok(klapic)
1150 }
1151
1152 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1157 let ret = {
1158 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1161 };
1162 if ret < 0 {
1163 return errno_result();
1164 }
1165 Ok(())
1166 }
1167
1168 pub fn get_apic_base(&self) -> Result<u64> {
1172 self.get_msr(MSR_IA32_APICBASE)
1173 }
1174
1175 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1179 self.set_msr(MSR_IA32_APICBASE, apic_base)
1180 }
1181
1182 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1186 let mut regs: kvm_sregs = Default::default();
1187 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1191 if ret >= 0 {
1192 Ok(regs.interrupt_bitmap)
1193 } else {
1194 errno_result()
1195 }
1196 }
1197
1198 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1202 let mut regs: kvm_sregs = Default::default();
1206 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1210 if ret >= 0 {
1211 regs.interrupt_bitmap = interrupt_bitmap;
1212 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, ®s) };
1217 if ret >= 0 {
1218 Ok(())
1219 } else {
1220 errno_result()
1221 }
1222 } else {
1223 errno_result()
1224 }
1225 }
1226}
1227
1228impl<'a> From<&'a kvm_cpuid2<[kvm_cpuid_entry2]>> for CpuId {
1229 fn from(kvm_cpuid: &'a kvm_cpuid2<[kvm_cpuid_entry2]>) -> CpuId {
1230 let kvm_entries = &kvm_cpuid.entries[..kvm_cpuid.nent as usize];
1231 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1232
1233 for entry in kvm_entries {
1234 let cpu_id_entry = CpuIdEntry {
1235 function: entry.function,
1236 index: entry.index,
1237 flags: entry.flags,
1238 cpuid: CpuidResult {
1239 eax: entry.eax,
1240 ebx: entry.ebx,
1241 ecx: entry.ecx,
1242 edx: entry.edx,
1243 },
1244 };
1245 cpu_id_entries.push(cpu_id_entry)
1246 }
1247 CpuId { cpu_id_entries }
1248 }
1249}
1250
1251impl From<&CpuId> for Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1252 fn from(cpuid: &CpuId) -> Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1253 let mut kvm =
1254 kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(cpuid.cpu_id_entries.len())
1255 .unwrap();
1256 kvm.nent = cpuid.cpu_id_entries.len().try_into().unwrap();
1257 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1258 kvm.entries[i] = kvm_cpuid_entry2 {
1259 function: e.function,
1260 index: e.index,
1261 flags: e.flags,
1262 eax: e.cpuid.eax,
1263 ebx: e.cpuid.ebx,
1264 ecx: e.cpuid.ecx,
1265 edx: e.cpuid.edx,
1266 ..Default::default()
1267 };
1268 }
1269 kvm
1270 }
1271}
1272
1273impl From<&ClockState> for kvm_clock_data {
1274 fn from(state: &ClockState) -> Self {
1275 kvm_clock_data {
1276 clock: state.clock,
1277 ..Default::default()
1278 }
1279 }
1280}
1281
1282impl From<&kvm_clock_data> for ClockState {
1283 fn from(clock_data: &kvm_clock_data) -> Self {
1284 ClockState {
1285 clock: clock_data.clock,
1286 }
1287 }
1288}
1289
1290impl From<&kvm_pic_state> for PicState {
1291 fn from(item: &kvm_pic_state) -> Self {
1292 PicState {
1293 last_irr: item.last_irr,
1294 irr: item.irr,
1295 imr: item.imr,
1296 isr: item.isr,
1297 priority_add: item.priority_add,
1298 irq_base: item.irq_base,
1299 read_reg_select: item.read_reg_select != 0,
1300 poll: item.poll != 0,
1301 special_mask: item.special_mask != 0,
1302 init_state: item.init_state.into(),
1303 auto_eoi: item.auto_eoi != 0,
1304 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1305 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1306 use_4_byte_icw: item.init4 != 0,
1307 elcr: item.elcr,
1308 elcr_mask: item.elcr_mask,
1309 }
1310 }
1311}
1312
1313impl From<&PicState> for kvm_pic_state {
1314 fn from(item: &PicState) -> Self {
1315 kvm_pic_state {
1316 last_irr: item.last_irr,
1317 irr: item.irr,
1318 imr: item.imr,
1319 isr: item.isr,
1320 priority_add: item.priority_add,
1321 irq_base: item.irq_base,
1322 read_reg_select: item.read_reg_select as u8,
1323 poll: item.poll as u8,
1324 special_mask: item.special_mask as u8,
1325 init_state: item.init_state as u8,
1326 auto_eoi: item.auto_eoi as u8,
1327 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1328 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1329 init4: item.use_4_byte_icw as u8,
1330 elcr: item.elcr,
1331 elcr_mask: item.elcr_mask,
1332 }
1333 }
1334}
1335
1336impl From<&kvm_ioapic_state> for IoapicState {
1337 fn from(item: &kvm_ioapic_state) -> Self {
1338 let mut state = IoapicState {
1339 base_address: item.base_address,
1340 ioregsel: item.ioregsel as u8,
1341 ioapicid: item.id,
1342 current_interrupt_level_bitmap: item.irr,
1343 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1344 };
1345 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1346 *out_state = in_state.into();
1347 }
1348 state
1349 }
1350}
1351
1352impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
1353 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1354 kvm_ioapic_state__bindgen_ty_1 {
1355 bits: item.get(0, 64),
1358 }
1359 }
1360}
1361
1362impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
1363 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1364 let mut entry = IoapicRedirectionTableEntry::default();
1365 entry.set(0, 64, unsafe { item.bits });
1369 entry
1370 }
1371}
1372
1373impl From<&IoapicState> for kvm_ioapic_state {
1374 fn from(item: &IoapicState) -> Self {
1375 let mut state = kvm_ioapic_state {
1376 base_address: item.base_address,
1377 ioregsel: item.ioregsel as u32,
1378 id: item.ioapicid,
1379 irr: item.current_interrupt_level_bitmap,
1380 ..Default::default()
1381 };
1382 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1383 *out_state = in_state.into();
1384 }
1385 state
1386 }
1387}
1388
1389impl From<&LapicState> for kvm_lapic_state {
1390 fn from(item: &LapicState) -> Self {
1391 let mut state = kvm_lapic_state::default();
1392 for (reg, value) in item.regs.iter().enumerate() {
1394 let reg_offset = 16 * reg;
1396 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1397
1398 for (i, v) in value.to_le_bytes().iter().enumerate() {
1401 regs_slice[i] = *v as i8;
1402 }
1403 }
1404 state
1405 }
1406}
1407
1408impl From<&kvm_lapic_state> for LapicState {
1409 fn from(item: &kvm_lapic_state) -> Self {
1410 let mut state = LapicState { regs: [0; 64] };
1411 for reg in 0..64 {
1413 let reg_offset = 16 * reg;
1415
1416 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1418 let mut bytes = [0u8; 4];
1419 for i in 0..4 {
1420 bytes[i] = reg_slice[i] as u8;
1421 }
1422 state.regs[reg] = u32::from_le_bytes(bytes);
1423 }
1424 state
1425 }
1426}
1427
1428impl From<&PitState> for kvm_pit_state2 {
1429 fn from(item: &PitState) -> Self {
1430 kvm_pit_state2 {
1431 channels: [
1432 kvm_pit_channel_state::from(&item.channels[0]),
1433 kvm_pit_channel_state::from(&item.channels[1]),
1434 kvm_pit_channel_state::from(&item.channels[2]),
1435 ],
1436 flags: item.flags,
1437 ..Default::default()
1438 }
1439 }
1440}
1441
1442impl From<&kvm_pit_state2> for PitState {
1443 fn from(item: &kvm_pit_state2) -> Self {
1444 PitState {
1445 channels: [
1446 PitChannelState::from(&item.channels[0]),
1447 PitChannelState::from(&item.channels[1]),
1448 PitChannelState::from(&item.channels[2]),
1449 ],
1450 flags: item.flags,
1451 }
1452 }
1453}
1454
1455impl From<&PitChannelState> for kvm_pit_channel_state {
1456 fn from(item: &PitChannelState) -> Self {
1457 kvm_pit_channel_state {
1458 count: item.count,
1459 latched_count: item.latched_count,
1460 count_latched: item.count_latched as u8,
1461 status_latched: item.status_latched as u8,
1462 status: item.status,
1463 read_state: item.read_state as u8,
1464 write_state: item.write_state as u8,
1465 write_latch: item.reload_value as u8,
1467 rw_mode: item.rw_mode as u8,
1468 mode: item.mode,
1469 bcd: item.bcd as u8,
1470 gate: item.gate as u8,
1471 count_load_time: item.count_load_time as i64,
1472 }
1473 }
1474}
1475
1476impl From<&kvm_pit_channel_state> for PitChannelState {
1477 fn from(item: &kvm_pit_channel_state) -> Self {
1478 PitChannelState {
1479 count: item.count,
1480 latched_count: item.latched_count,
1481 count_latched: item.count_latched.into(),
1482 status_latched: item.status_latched != 0,
1483 status: item.status,
1484 read_state: item.read_state.into(),
1485 write_state: item.write_state.into(),
1486 reload_value: item.write_latch as u16,
1488 rw_mode: item.rw_mode.into(),
1489 mode: item.mode,
1490 bcd: item.bcd != 0,
1491 gate: item.gate != 0,
1492 count_load_time: item.count_load_time as u64,
1493 }
1494 }
1495}
1496
1497pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1501 match chip {
1502 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1503 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1504 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1505 _ => {
1506 error!("Invalid IrqChipSource for X86 {:?}", chip);
1507 0
1508 }
1509 }
1510}
1511
1512impl From<&kvm_regs> for Regs {
1513 fn from(r: &kvm_regs) -> Self {
1514 Regs {
1515 rax: r.rax,
1516 rbx: r.rbx,
1517 rcx: r.rcx,
1518 rdx: r.rdx,
1519 rsi: r.rsi,
1520 rdi: r.rdi,
1521 rsp: r.rsp,
1522 rbp: r.rbp,
1523 r8: r.r8,
1524 r9: r.r9,
1525 r10: r.r10,
1526 r11: r.r11,
1527 r12: r.r12,
1528 r13: r.r13,
1529 r14: r.r14,
1530 r15: r.r15,
1531 rip: r.rip,
1532 rflags: r.rflags,
1533 }
1534 }
1535}
1536
1537impl From<&Regs> for kvm_regs {
1538 fn from(r: &Regs) -> Self {
1539 kvm_regs {
1540 rax: r.rax,
1541 rbx: r.rbx,
1542 rcx: r.rcx,
1543 rdx: r.rdx,
1544 rsi: r.rsi,
1545 rdi: r.rdi,
1546 rsp: r.rsp,
1547 rbp: r.rbp,
1548 r8: r.r8,
1549 r9: r.r9,
1550 r10: r.r10,
1551 r11: r.r11,
1552 r12: r.r12,
1553 r13: r.r13,
1554 r14: r.r14,
1555 r15: r.r15,
1556 rip: r.rip,
1557 rflags: r.rflags,
1558 }
1559 }
1560}
1561
1562impl From<&VcpuEvents> for kvm_vcpu_events {
1563 fn from(ve: &VcpuEvents) -> Self {
1564 let mut kvm_ve: kvm_vcpu_events = Default::default();
1565
1566 kvm_ve.exception.injected = ve.exception.injected as u8;
1567 kvm_ve.exception.nr = ve.exception.nr;
1568 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1569 if let Some(pending) = ve.exception.pending {
1570 kvm_ve.exception.pending = pending as u8;
1571 if ve.exception_payload.is_some() {
1572 kvm_ve.exception_has_payload = true as u8;
1573 }
1574 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1575 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1576 }
1577 kvm_ve.exception.error_code = ve.exception.error_code;
1578
1579 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1580 kvm_ve.interrupt.nr = ve.interrupt.nr;
1581 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1582 if let Some(shadow) = ve.interrupt.shadow {
1583 kvm_ve.interrupt.shadow = shadow;
1584 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1585 }
1586
1587 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1588 if let Some(pending) = ve.nmi.pending {
1589 kvm_ve.nmi.pending = pending as u8;
1590 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1591 }
1592 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1593
1594 if let Some(sipi_vector) = ve.sipi_vector {
1595 kvm_ve.sipi_vector = sipi_vector;
1596 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1597 }
1598
1599 if let Some(smm) = ve.smi.smm {
1600 kvm_ve.smi.smm = smm as u8;
1601 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1602 }
1603 kvm_ve.smi.pending = ve.smi.pending as u8;
1604 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1605 kvm_ve.smi.latched_init = ve.smi.latched_init;
1606
1607 if let Some(pending) = ve.triple_fault.pending {
1608 kvm_ve.triple_fault.pending = pending as u8;
1609 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1610 }
1611 kvm_ve
1612 }
1613}
1614
1615impl From<&kvm_vcpu_events> for VcpuEvents {
1616 fn from(ve: &kvm_vcpu_events) -> Self {
1617 let exception = VcpuExceptionState {
1618 injected: ve.exception.injected != 0,
1619 nr: ve.exception.nr,
1620 has_error_code: ve.exception.has_error_code != 0,
1621 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1622 Some(ve.exception.pending != 0)
1623 } else {
1624 None
1625 },
1626 error_code: ve.exception.error_code,
1627 };
1628
1629 let interrupt = VcpuInterruptState {
1630 injected: ve.interrupt.injected != 0,
1631 nr: ve.interrupt.nr,
1632 soft: ve.interrupt.soft != 0,
1633 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1634 Some(ve.interrupt.shadow)
1635 } else {
1636 None
1637 },
1638 };
1639
1640 let nmi = VcpuNmiState {
1641 injected: ve.interrupt.injected != 0,
1642 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1643 Some(ve.nmi.pending != 0)
1644 } else {
1645 None
1646 },
1647 masked: ve.nmi.masked != 0,
1648 };
1649
1650 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1651 Some(ve.sipi_vector)
1652 } else {
1653 None
1654 };
1655
1656 let smi = VcpuSmiState {
1657 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1658 Some(ve.smi.smm != 0)
1659 } else {
1660 None
1661 },
1662 pending: ve.smi.pending != 0,
1663 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1664 latched_init: ve.smi.latched_init,
1665 };
1666
1667 let triple_fault = VcpuTripleFaultState {
1668 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1669 Some(ve.triple_fault.pending != 0)
1670 } else {
1671 None
1672 },
1673 };
1674
1675 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1676 Some(ve.exception_payload)
1677 } else {
1678 None
1679 };
1680
1681 VcpuEvents {
1682 exception,
1683 interrupt,
1684 nmi,
1685 sipi_vector,
1686 smi,
1687 triple_fault,
1688 exception_payload,
1689 }
1690 }
1691}
1692
1693impl From<&kvm_segment> for Segment {
1694 fn from(s: &kvm_segment) -> Self {
1695 Segment {
1696 base: s.base,
1697 limit_bytes: s.limit,
1698 selector: s.selector,
1699 type_: s.type_,
1700 present: s.present,
1701 dpl: s.dpl,
1702 db: s.db,
1703 s: s.s,
1704 l: s.l,
1705 g: s.g,
1706 avl: s.avl,
1707 }
1708 }
1709}
1710
1711impl From<&Segment> for kvm_segment {
1712 fn from(s: &Segment) -> Self {
1713 kvm_segment {
1714 base: s.base,
1715 limit: s.limit_bytes,
1716 selector: s.selector,
1717 type_: s.type_,
1718 present: s.present,
1719 dpl: s.dpl,
1720 db: s.db,
1721 s: s.s,
1722 l: s.l,
1723 g: s.g,
1724 avl: s.avl,
1725 unusable: match s.present {
1726 0 => 1,
1727 _ => 0,
1728 },
1729 ..Default::default()
1730 }
1731 }
1732}
1733
1734impl From<&kvm_dtable> for DescriptorTable {
1735 fn from(dt: &kvm_dtable) -> Self {
1736 DescriptorTable {
1737 base: dt.base,
1738 limit: dt.limit,
1739 }
1740 }
1741}
1742
1743impl From<&DescriptorTable> for kvm_dtable {
1744 fn from(dt: &DescriptorTable) -> Self {
1745 kvm_dtable {
1746 base: dt.base,
1747 limit: dt.limit,
1748 ..Default::default()
1749 }
1750 }
1751}
1752
1753impl From<&kvm_sregs> for Sregs {
1754 fn from(r: &kvm_sregs) -> Self {
1755 Sregs {
1756 cs: Segment::from(&r.cs),
1757 ds: Segment::from(&r.ds),
1758 es: Segment::from(&r.es),
1759 fs: Segment::from(&r.fs),
1760 gs: Segment::from(&r.gs),
1761 ss: Segment::from(&r.ss),
1762 tr: Segment::from(&r.tr),
1763 ldt: Segment::from(&r.ldt),
1764 gdt: DescriptorTable::from(&r.gdt),
1765 idt: DescriptorTable::from(&r.idt),
1766 cr0: r.cr0,
1767 cr2: r.cr2,
1768 cr3: r.cr3,
1769 cr4: r.cr4,
1770 cr8: r.cr8,
1771 efer: r.efer,
1772 }
1773 }
1774}
1775
1776impl From<&kvm_fpu> for Fpu {
1777 fn from(r: &kvm_fpu) -> Self {
1778 Fpu {
1779 fpr: FpuReg::from_16byte_arrays(&r.fpr),
1780 fcw: r.fcw,
1781 fsw: r.fsw,
1782 ftwx: r.ftwx,
1783 last_opcode: r.last_opcode,
1784 last_ip: r.last_ip,
1785 last_dp: r.last_dp,
1786 xmm: r.xmm,
1787 mxcsr: r.mxcsr,
1788 }
1789 }
1790}
1791
1792impl From<&Fpu> for kvm_fpu {
1793 fn from(r: &Fpu) -> Self {
1794 kvm_fpu {
1795 fpr: FpuReg::to_16byte_arrays(&r.fpr),
1796 fcw: r.fcw,
1797 fsw: r.fsw,
1798 ftwx: r.ftwx,
1799 last_opcode: r.last_opcode,
1800 last_ip: r.last_ip,
1801 last_dp: r.last_dp,
1802 xmm: r.xmm,
1803 mxcsr: r.mxcsr,
1804 ..Default::default()
1805 }
1806 }
1807}
1808
1809impl From<&kvm_debugregs> for DebugRegs {
1810 fn from(r: &kvm_debugregs) -> Self {
1811 DebugRegs {
1812 db: r.db,
1813 dr6: r.dr6,
1814 dr7: r.dr7,
1815 }
1816 }
1817}
1818
1819impl From<&DebugRegs> for kvm_debugregs {
1820 fn from(r: &DebugRegs) -> Self {
1821 kvm_debugregs {
1822 db: r.db,
1823 dr6: r.dr6,
1824 dr7: r.dr7,
1825 ..Default::default()
1826 }
1827 }
1828}
1829
1830#[cfg(test)]
1831mod tests {
1832 use super::*;
1833
1834 #[test]
1835 fn vcpu_event_to_from() {
1836 let mut kvm_ve: kvm_vcpu_events = Default::default();
1838 kvm_ve.exception.injected = 1;
1839 kvm_ve.exception.nr = 65;
1840 kvm_ve.exception.has_error_code = 1;
1841 kvm_ve.exception.error_code = 110;
1842 kvm_ve.exception.pending = 1;
1843
1844 kvm_ve.interrupt.injected = 1;
1845 kvm_ve.interrupt.nr = 100;
1846 kvm_ve.interrupt.soft = 1;
1847 kvm_ve.interrupt.shadow = 114;
1848
1849 kvm_ve.nmi.injected = 1;
1850 kvm_ve.nmi.pending = 1;
1851 kvm_ve.nmi.masked = 0;
1852
1853 kvm_ve.sipi_vector = 105;
1854
1855 kvm_ve.smi.smm = 1;
1856 kvm_ve.smi.pending = 1;
1857 kvm_ve.smi.smm_inside_nmi = 1;
1858 kvm_ve.smi.latched_init = 100;
1859
1860 kvm_ve.triple_fault.pending = 0;
1861
1862 kvm_ve.exception_payload = 33;
1863 kvm_ve.exception_has_payload = 1;
1864
1865 kvm_ve.flags = 0
1866 | KVM_VCPUEVENT_VALID_PAYLOAD
1867 | KVM_VCPUEVENT_VALID_SMM
1868 | KVM_VCPUEVENT_VALID_NMI_PENDING
1869 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1870 | KVM_VCPUEVENT_VALID_SHADOW;
1871
1872 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1873 assert_eq!(ve.exception.injected, true);
1874 assert_eq!(ve.exception.nr, 65);
1875 assert_eq!(ve.exception.has_error_code, true);
1876 assert_eq!(ve.exception.error_code, 110);
1877 assert_eq!(ve.exception.pending.unwrap(), true);
1878
1879 assert_eq!(ve.interrupt.injected, true);
1880 assert_eq!(ve.interrupt.nr, 100);
1881 assert_eq!(ve.interrupt.soft, true);
1882 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1883
1884 assert_eq!(ve.nmi.injected, true);
1885 assert_eq!(ve.nmi.pending.unwrap(), true);
1886 assert_eq!(ve.nmi.masked, false);
1887
1888 assert_eq!(ve.sipi_vector.unwrap(), 105);
1889
1890 assert_eq!(ve.smi.smm.unwrap(), true);
1891 assert_eq!(ve.smi.pending, true);
1892 assert_eq!(ve.smi.smm_inside_nmi, true);
1893 assert_eq!(ve.smi.latched_init, 100);
1894
1895 assert_eq!(ve.triple_fault.pending, None);
1896
1897 assert_eq!(ve.exception_payload.unwrap(), 33);
1898
1899 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1900 assert_eq!(kvm_ve_restored.exception.injected, 1);
1901 assert_eq!(kvm_ve_restored.exception.nr, 65);
1902 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1903 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1904 assert_eq!(kvm_ve_restored.exception.pending, 1);
1905
1906 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1907 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1908 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1909 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1910
1911 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1912 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1913 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1914
1915 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1916
1917 assert_eq!(kvm_ve_restored.smi.smm, 1);
1918 assert_eq!(kvm_ve_restored.smi.pending, 1);
1919 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1920 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1921
1922 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1923
1924 assert_eq!(kvm_ve_restored.exception_payload, 33);
1925 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1926 }
1927}