1use std::arch::x86_64::CpuidResult;
6use std::collections::BTreeMap;
7
8use base::errno_result;
9use base::error;
10use base::ioctl;
11use base::ioctl_with_mut_ptr;
12use base::ioctl_with_mut_ref;
13use base::ioctl_with_ptr;
14use base::ioctl_with_ref;
15use base::ioctl_with_val;
16use base::AsRawDescriptor;
17use base::Error;
18use base::IoctlNr;
19use base::MappedRegion;
20use base::Result;
21use kvm_sys::*;
22use libc::E2BIG;
23use libc::EAGAIN;
24use libc::EINVAL;
25use libc::EIO;
26use libc::ENOMEM;
27use libc::ENXIO;
28use serde::Deserialize;
29use serde::Serialize;
30use snapshot::AnySnapshot;
31use vm_memory::GuestAddress;
32use zerocopy::FromZeros;
33
34use super::Config;
35use super::Kvm;
36use super::KvmCap;
37use super::KvmVcpu;
38use super::KvmVm;
39use crate::host_phys_addr_bits;
40use crate::ClockState;
41use crate::CpuId;
42use crate::CpuIdEntry;
43use crate::DebugRegs;
44use crate::DescriptorTable;
45use crate::DeviceKind;
46use crate::Fpu;
47use crate::FpuReg;
48use crate::HypervisorX86_64;
49use crate::IoapicRedirectionTableEntry;
50use crate::IoapicState;
51use crate::IrqSourceChip;
52use crate::LapicState;
53use crate::PicSelect;
54use crate::PicState;
55use crate::PitChannelState;
56use crate::PitState;
57use crate::ProtectionType;
58use crate::Regs;
59use crate::Segment;
60use crate::Sregs;
61use crate::VcpuExit;
62use crate::VcpuX86_64;
63use crate::VmCap;
64use crate::VmX86_64;
65use crate::Xsave;
66use crate::NUM_IOAPIC_PINS;
67
68const KVM_XSAVE_MAX_SIZE: usize = 4096;
69const MSR_IA32_APICBASE: u32 = 0x0000001b;
70
71#[derive(Debug, Clone, Serialize, Deserialize)]
72pub struct VcpuEvents {
73 pub exception: VcpuExceptionState,
74 pub interrupt: VcpuInterruptState,
75 pub nmi: VcpuNmiState,
76 pub sipi_vector: Option<u32>,
77 pub smi: VcpuSmiState,
78 pub triple_fault: VcpuTripleFaultState,
79 pub exception_payload: Option<u64>,
80}
81
82#[derive(Debug, Clone, Serialize, Deserialize)]
83pub struct VcpuExceptionState {
84 pub injected: bool,
85 pub nr: u8,
86 pub has_error_code: bool,
87 pub pending: Option<bool>,
88 pub error_code: u32,
89}
90
91#[derive(Debug, Clone, Serialize, Deserialize)]
92pub struct VcpuInterruptState {
93 pub injected: bool,
94 pub nr: u8,
95 pub soft: bool,
96 pub shadow: Option<u8>,
97}
98
99#[derive(Debug, Clone, Serialize, Deserialize)]
100pub struct VcpuNmiState {
101 pub injected: bool,
102 pub pending: Option<bool>,
103 pub masked: bool,
104}
105
106#[derive(Debug, Clone, Serialize, Deserialize)]
107pub struct VcpuSmiState {
108 pub smm: Option<bool>,
109 pub pending: bool,
110 pub smm_inside_nmi: bool,
111 pub latched_init: u8,
112}
113
114#[derive(Debug, Clone, Serialize, Deserialize)]
115pub struct VcpuTripleFaultState {
116 pub pending: Option<bool>,
117}
118
119pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
120 descriptor: &T,
121 kind: IoctlNr,
122 initial_capacity: usize,
123) -> Result<CpuId> {
124 let mut entries: usize = initial_capacity;
125
126 loop {
127 let mut kvm_cpuid =
128 kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(entries).unwrap();
129 kvm_cpuid.nent = entries.try_into().unwrap();
130
131 let ret = {
132 unsafe { ioctl_with_mut_ref(descriptor, kind, &mut *kvm_cpuid) }
137 };
138 if ret < 0 {
139 let err = Error::last();
140 match err.errno() {
141 E2BIG => {
142 if let Some(val) = entries.checked_mul(2) {
144 entries = val;
145 } else {
146 return Err(err);
147 }
148 }
149 _ => return Err(err),
150 }
151 } else {
152 return Ok(CpuId::from(&*kvm_cpuid));
153 }
154 }
155}
156
157impl Kvm {
158 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
159 const KVM_MAX_ENTRIES: usize = 256;
160 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
161 }
162
163 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
164 if protection_type.isolates_memory() {
165 Ok(KVM_X86_PKVM_PROTECTED_VM)
166 } else {
167 Ok(KVM_X86_DEFAULT_VM)
168 }
169 }
170
171 pub fn get_guest_phys_addr_bits(&self) -> u8 {
173 host_phys_addr_bits()
175 }
176}
177
178impl HypervisorX86_64 for Kvm {
179 fn get_supported_cpuid(&self) -> Result<CpuId> {
180 self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
181 }
182
183 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
184 const MAX_KVM_MSR_ENTRIES: usize = 256;
185
186 let mut msr_list = kvm_msr_list::<[u32; MAX_KVM_MSR_ENTRIES]>::new_zeroed();
187 msr_list.nmsrs = MAX_KVM_MSR_ENTRIES as u32;
188
189 let ret = {
190 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list) }
195 };
196 if ret < 0 {
197 return errno_result();
198 }
199
200 let mut nmsrs = msr_list.nmsrs;
201 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
202 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
203 }
204
205 Ok(msr_list.indices[..nmsrs as usize].to_vec())
206 }
207}
208
209impl KvmVm {
210 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
212 Ok(())
213 }
214
215 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
218 match c {
219 VmCap::PvClock => Some(true),
220 _ => None,
221 }
222 }
223
224 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
227 None
228 }
229
230 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
232 let mut clock_data: kvm_clock_data = Default::default();
233 let ret =
234 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
238 if ret == 0 {
239 Ok(ClockState::from(&clock_data))
240 } else {
241 errno_result()
242 }
243 }
244
245 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
247 let clock_data = kvm_clock_data::from(state);
248 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
252 if ret == 0 {
253 Ok(())
254 } else {
255 errno_result()
256 }
257 }
258
259 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
263 let mut irqchip_state = kvm_irqchip {
264 chip_id: id as u32,
265 ..Default::default()
266 };
267 let ret = {
268 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
272 };
273 if ret == 0 {
274 Ok(
275 unsafe { irqchip_state.chip.pic },
279 )
280 } else {
281 errno_result()
282 }
283 }
284
285 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
289 let mut irqchip_state = kvm_irqchip {
290 chip_id: id as u32,
291 ..Default::default()
292 };
293 irqchip_state.chip.pic = *state;
294 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
298 if ret == 0 {
299 Ok(())
300 } else {
301 errno_result()
302 }
303 }
304
305 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
307 Ok(NUM_IOAPIC_PINS)
308 }
309
310 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
314 let mut irqchip_state = kvm_irqchip {
315 chip_id: 2,
316 ..Default::default()
317 };
318 let ret = {
319 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
323 };
324 if ret == 0 {
325 Ok(
326 unsafe { irqchip_state.chip.ioapic },
330 )
331 } else {
332 errno_result()
333 }
334 }
335
336 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
340 let mut irqchip_state = kvm_irqchip {
341 chip_id: 2,
342 ..Default::default()
343 };
344 irqchip_state.chip.ioapic = *state;
345 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
349 if ret == 0 {
350 Ok(())
351 } else {
352 errno_result()
353 }
354 }
355
356 pub fn create_pit(&self) -> Result<()> {
360 let pit_config = kvm_pit_config::default();
361 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
365 if ret == 0 {
366 Ok(())
367 } else {
368 errno_result()
369 }
370 }
371
372 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
376 let mut pit_state = Default::default();
377 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
381 if ret == 0 {
382 Ok(pit_state)
383 } else {
384 errno_result()
385 }
386 }
387
388 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
392 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
396 if ret == 0 {
397 Ok(())
398 } else {
399 errno_result()
400 }
401 }
402
403 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
405 let mut cap = kvm_enable_cap {
406 cap: KVM_CAP_MSR_PLATFORM_INFO,
407 ..Default::default()
408 };
409 cap.args[0] = allow_read as u64;
410
411 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
416 if ret < 0 {
417 errno_result()
418 } else {
419 Ok(())
420 }
421 }
422
423 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
425 let mut cap = kvm_enable_cap {
426 cap: KVM_CAP_SPLIT_IRQCHIP,
427 ..Default::default()
428 };
429 cap.args[0] = ioapic_pins as u64;
430 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
434 if ret < 0 {
435 errno_result()
436 } else {
437 Ok(())
438 }
439 }
440
441 fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
448 let mut info = KvmProtectedVmInfo {
449 firmware_size: 0,
450 reserved: [0; 7],
451 };
452 unsafe {
456 self.enable_raw_capability(
457 KvmCap::X86ProtectedVm,
458 KVM_CAP_X86_PROTECTED_VM_FLAGS_INFO,
459 &[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
460 )
461 }?;
462 Ok(info)
463 }
464
465 fn set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()> {
466 unsafe {
469 self.enable_raw_capability(
470 KvmCap::X86ProtectedVm,
471 KVM_CAP_X86_PROTECTED_VM_FLAGS_SET_FW_GPA,
472 &[fw_addr.0, 0, 0, 0],
473 )
474 }
475 }
476}
477
478#[repr(C)]
479struct KvmProtectedVmInfo {
480 firmware_size: u64,
481 reserved: [u64; 7],
482}
483
484impl VmX86_64 for KvmVm {
485 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
486 &self.kvm
487 }
488
489 fn load_protected_vm_firmware(
490 &mut self,
491 fw_addr: GuestAddress,
492 fw_max_size: u64,
493 ) -> Result<()> {
494 let info = self.get_protected_vm_info()?;
495 if info.firmware_size == 0 {
496 Err(Error::new(EINVAL))
497 } else {
498 if info.firmware_size > fw_max_size {
499 return Err(Error::new(ENOMEM));
500 }
501 self.set_protected_vm_firmware_gpa(fw_addr)
502 }
503 }
504
505 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
506 Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
509 }
510
511 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
515 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
518 if ret == 0 {
519 Ok(())
520 } else {
521 errno_result()
522 }
523 }
524
525 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
529 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
532 if ret == 0 {
533 Ok(())
534 } else {
535 errno_result()
536 }
537 }
538}
539
540impl KvmVcpu {
541 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
544 Ok(VcpuExit::SystemEventReset)
545 }
546
547 fn xsave_size(&self) -> Result<usize> {
554 let size = {
555 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
558 };
559 if size < 0 {
560 return errno_result();
561 }
562 let size: usize = size.try_into().unwrap();
564 Ok(size.max(KVM_XSAVE_MAX_SIZE))
565 }
566
567 #[inline]
568 pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
569 match run.exit_reason {
570 KVM_EXIT_IO => Some(VcpuExit::Io),
571 KVM_EXIT_IOAPIC_EOI => {
572 let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
576 Some(VcpuExit::IoapicEoi { vector })
577 }
578 KVM_EXIT_HLT => Some(VcpuExit::Hlt),
579 KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
580 KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
581 KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
582 _ => None,
583 }
584 }
585}
586
587#[derive(Debug, Serialize, Deserialize)]
588struct HypervisorState {
589 interrupts: VcpuEvents,
590 nested_state: Vec<u8>,
591}
592
593impl VcpuX86_64 for KvmVcpu {
594 #[allow(clippy::cast_ptr_alignment)]
595 fn set_interrupt_window_requested(&self, requested: bool) {
596 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
601 run.request_interrupt_window = requested.into();
602 }
603
604 #[allow(clippy::cast_ptr_alignment)]
605 fn ready_for_interrupt(&self) -> bool {
606 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
611 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
612 }
613
614 fn interrupt(&self, irq: u8) -> Result<()> {
619 if !self.ready_for_interrupt() {
620 return Err(Error::new(EAGAIN));
621 }
622
623 let interrupt = kvm_interrupt { irq: irq.into() };
624 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
628 if ret == 0 {
629 Ok(())
630 } else {
631 errno_result()
632 }
633 }
634
635 fn inject_nmi(&self) -> Result<()> {
636 let ret = unsafe { ioctl(self, KVM_NMI) };
639 if ret == 0 {
640 Ok(())
641 } else {
642 errno_result()
643 }
644 }
645
646 fn get_regs(&self) -> Result<Regs> {
647 let mut regs: kvm_regs = Default::default();
648 let ret = {
649 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
654 };
655 if ret == 0 {
656 Ok(Regs::from(®s))
657 } else {
658 errno_result()
659 }
660 }
661
662 fn set_regs(&self, regs: &Regs) -> Result<()> {
663 let regs = kvm_regs::from(regs);
664 let ret = {
665 unsafe { ioctl_with_ref(self, KVM_SET_REGS, ®s) }
670 };
671 if ret == 0 {
672 Ok(())
673 } else {
674 errno_result()
675 }
676 }
677
678 fn get_sregs(&self) -> Result<Sregs> {
679 let mut regs: kvm_sregs = Default::default();
680 let ret = {
681 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
686 };
687 if ret == 0 {
688 Ok(Sregs::from(®s))
689 } else {
690 errno_result()
691 }
692 }
693
694 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
695 let mut kvm_sregs: kvm_sregs = Default::default();
698 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
702 if ret != 0 {
703 return errno_result();
704 }
705
706 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
707 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
708 kvm_sregs.es = kvm_segment::from(&sregs.es);
709 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
710 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
711 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
712 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
713 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
714 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
715 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
716 kvm_sregs.cr0 = sregs.cr0;
717 kvm_sregs.cr2 = sregs.cr2;
718 kvm_sregs.cr3 = sregs.cr3;
719 kvm_sregs.cr4 = sregs.cr4;
720 kvm_sregs.cr8 = sregs.cr8;
721 kvm_sregs.efer = sregs.efer;
722
723 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
727 if ret == 0 {
728 Ok(())
729 } else {
730 errno_result()
731 }
732 }
733
734 fn get_fpu(&self) -> Result<Fpu> {
735 let mut fpu: kvm_fpu = Default::default();
736 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
740 if ret == 0 {
741 Ok(Fpu::from(&fpu))
742 } else {
743 errno_result()
744 }
745 }
746
747 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
748 let fpu = kvm_fpu::from(fpu);
749 let ret = {
750 unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
753 };
754 if ret == 0 {
755 Ok(())
756 } else {
757 errno_result()
758 }
759 }
760
761 fn get_xsave(&self) -> Result<Xsave> {
763 let size = self.xsave_size()?;
764 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
765 KVM_GET_XSAVE2
766 } else {
767 KVM_GET_XSAVE
768 };
769 let mut xsave = Xsave::new(size);
770
771 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
775 if ret == 0 {
776 Ok(xsave)
777 } else {
778 errno_result()
779 }
780 }
781
782 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
783 let size = self.xsave_size()?;
784 if xsave.len() != size {
787 return Err(Error::new(EIO));
788 }
789
790 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
796 if ret == 0 {
797 Ok(())
798 } else {
799 errno_result()
800 }
801 }
802
803 fn get_hypervisor_specific_state(&self) -> Result<AnySnapshot> {
804 let mut vcpu_evts: kvm_vcpu_events = Default::default();
805 let ret = { unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) } };
810 if ret != 0 {
811 return errno_result();
812 }
813 let interrupts = VcpuEvents::from(&vcpu_evts);
814 let ret =
815 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_NESTED_STATE as u64) };
818 if ret < 0 {
819 return errno_result();
820 }
821 let nested_state = if ret == 0 {
823 Vec::new()
824 } else {
825 let mut nested_state: Vec<u8> = vec![0; ret as usize];
826 let nested_state_ptr = nested_state.as_ptr() as *mut kvm_nested_state;
827 assert!(nested_state_ptr.is_aligned());
828 unsafe {
836 (*nested_state_ptr).size = ret as u32;
837 }
838 assert!(nested_state.as_ptr().is_aligned());
839 let ret = unsafe {
843 ioctl_with_mut_ptr(self, KVM_GET_NESTED_STATE, nested_state.as_mut_ptr())
844 };
845 if ret < 0 {
846 return errno_result();
847 }
848 nested_state
849 };
850 AnySnapshot::to_any(HypervisorState {
851 interrupts,
852 nested_state,
853 })
854 .map_err(|e| {
855 error!("failed to serialize hypervisor state: {:?}", e);
856 Error::new(EIO)
857 })
858 }
859
860 fn set_hypervisor_specific_state(&self, data: AnySnapshot) -> Result<()> {
861 let hypervisor_state = AnySnapshot::from_any::<HypervisorState>(data).map_err(|e| {
862 error!("failed to deserialize hypervisor_state: {:?}", e);
863 Error::new(EIO)
864 })?;
865 let vcpu_events = kvm_vcpu_events::from(&hypervisor_state.interrupts);
866 let ret = {
867 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
872 };
873 if ret != 0 {
874 return errno_result();
875 }
876 if hypervisor_state.nested_state.is_empty() {
877 return Ok(());
878 }
879 unsafe {
888 let vec_len = hypervisor_state.nested_state.len();
889 assert!(
890 (hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state).is_aligned()
891 );
892 if (*(hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state)).size
893 > vec_len as u32
894 {
895 error!("Invalued nested state data, size larger than vec allocated.");
896 return Err(Error::new(EINVAL));
897 }
898 }
899 let ret = unsafe {
904 ioctl_with_ptr(
905 self,
906 KVM_SET_NESTED_STATE,
907 hypervisor_state.nested_state.as_ptr(),
908 )
909 };
910 if ret == 0 {
911 Ok(())
912 } else {
913 errno_result()
914 }
915 }
916
917 fn get_debugregs(&self) -> Result<DebugRegs> {
918 let mut regs: kvm_debugregs = Default::default();
919 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
923 if ret == 0 {
924 Ok(DebugRegs::from(®s))
925 } else {
926 errno_result()
927 }
928 }
929
930 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
931 let dregs = kvm_debugregs::from(dregs);
932 let ret = {
933 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
936 };
937 if ret == 0 {
938 Ok(())
939 } else {
940 errno_result()
941 }
942 }
943
944 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
945 let mut regs: kvm_xcrs = Default::default();
946 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
950 if ret < 0 {
951 return errno_result();
952 }
953
954 Ok(regs
955 .xcrs
956 .iter()
957 .take(regs.nr_xcrs as usize)
958 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
959 .collect())
960 }
961
962 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
963 let mut kvm_xcr = kvm_xcrs {
964 nr_xcrs: 1,
965 ..Default::default()
966 };
967 kvm_xcr.xcrs[0].xcr = xcr_index;
968 kvm_xcr.xcrs[0].value = value;
969
970 let ret = {
971 unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
974 };
975 if ret == 0 {
976 Ok(())
977 } else {
978 errno_result()
979 }
980 }
981
982 fn get_msr(&self, msr_index: u32) -> Result<u64> {
983 let mut msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
984 msrs.nmsrs = 1;
985 msrs.entries[0].index = msr_index;
986
987 let ret = {
988 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs) }
991 };
992 if ret < 0 {
993 return errno_result();
994 }
995
996 if ret != 1 {
998 return Err(base::Error::new(libc::ENOENT));
999 }
1000
1001 Ok(msrs.entries[0].data)
1002 }
1003
1004 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
1005 let msr_index_list = self.kvm.get_msr_index_list()?;
1006
1007 let mut kvm_msrs =
1008 kvm_msrs::<[kvm_msr_entry]>::new_box_zeroed_with_elems(msr_index_list.len()).unwrap();
1009 kvm_msrs.nmsrs = msr_index_list.len() as u32;
1010 kvm_msrs
1011 .entries
1012 .iter_mut()
1013 .zip(msr_index_list.iter())
1014 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
1015
1016 let ret = {
1017 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut *kvm_msrs) }
1020 };
1021 if ret < 0 {
1022 return errno_result();
1023 }
1024
1025 let count = ret as usize;
1027 if count != msr_index_list.len() {
1028 error!(
1029 "failed to get all MSRs: requested {}, got {}",
1030 msr_index_list.len(),
1031 count,
1032 );
1033 return Err(base::Error::new(libc::EPERM));
1034 }
1035
1036 let msrs = BTreeMap::from_iter(
1037 kvm_msrs
1038 .entries
1039 .iter()
1040 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
1041 );
1042
1043 Ok(msrs)
1044 }
1045
1046 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1047 let mut kvm_msrs = kvm_msrs::<[kvm_msr_entry; 1]>::new_zeroed();
1048 kvm_msrs.nmsrs = 1;
1049 kvm_msrs.entries[0].index = msr_index;
1050 kvm_msrs.entries[0].data = value;
1051
1052 let ret = {
1053 unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs) }
1056 };
1057 if ret < 0 {
1058 return errno_result();
1059 }
1060
1061 if ret != 1 {
1063 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1064 return Err(base::Error::new(libc::EPERM));
1065 }
1066
1067 Ok(())
1068 }
1069
1070 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1071 let cpuid = Box::<kvm_cpuid2<[kvm_cpuid_entry2]>>::from(cpuid);
1072 let ret = {
1073 unsafe { ioctl_with_ref(self, KVM_SET_CPUID2, &*cpuid) }
1076 };
1077 if ret == 0 {
1078 Ok(())
1079 } else {
1080 errno_result()
1081 }
1082 }
1083
1084 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1085 use kvm_sys::*;
1086 let mut dbg: kvm_guest_debug = Default::default();
1087
1088 if addrs.len() > 4 {
1089 error!(
1090 "Support 4 breakpoints at most but {} addresses are passed",
1091 addrs.len()
1092 );
1093 return Err(base::Error::new(libc::EINVAL));
1094 }
1095
1096 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1097 if enable_singlestep {
1098 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1099 }
1100
1101 dbg.arch.debugreg[7] = 0x0600;
1105
1106 for (i, addr) in addrs.iter().enumerate() {
1107 dbg.arch.debugreg[i] = addr.0;
1108 dbg.arch.debugreg[7] |= 2 << (i * 2);
1110 }
1111
1112 let ret = {
1113 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1116 };
1117 if ret == 0 {
1118 Ok(())
1119 } else {
1120 errno_result()
1121 }
1122 }
1123
1124 fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1126 Err(Error::new(ENXIO))
1127 }
1128
1129 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1130 Ok(())
1132 }
1133}
1134
1135impl KvmVcpu {
1136 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1141 let mut klapic: kvm_lapic_state = Default::default();
1142
1143 let ret = {
1144 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1148 };
1149 if ret < 0 {
1150 return errno_result();
1151 }
1152 Ok(klapic)
1153 }
1154
1155 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1160 let ret = {
1161 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1164 };
1165 if ret < 0 {
1166 return errno_result();
1167 }
1168 Ok(())
1169 }
1170
1171 pub fn get_apic_base(&self) -> Result<u64> {
1175 self.get_msr(MSR_IA32_APICBASE)
1176 }
1177
1178 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1182 self.set_msr(MSR_IA32_APICBASE, apic_base)
1183 }
1184
1185 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1189 let mut regs: kvm_sregs = Default::default();
1190 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1194 if ret >= 0 {
1195 Ok(regs.interrupt_bitmap)
1196 } else {
1197 errno_result()
1198 }
1199 }
1200
1201 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1205 let mut regs: kvm_sregs = Default::default();
1209 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1213 if ret >= 0 {
1214 regs.interrupt_bitmap = interrupt_bitmap;
1215 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, ®s) };
1220 if ret >= 0 {
1221 Ok(())
1222 } else {
1223 errno_result()
1224 }
1225 } else {
1226 errno_result()
1227 }
1228 }
1229}
1230
1231impl<'a> From<&'a kvm_cpuid2<[kvm_cpuid_entry2]>> for CpuId {
1232 fn from(kvm_cpuid: &'a kvm_cpuid2<[kvm_cpuid_entry2]>) -> CpuId {
1233 let kvm_entries = &kvm_cpuid.entries[..kvm_cpuid.nent as usize];
1234 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1235
1236 for entry in kvm_entries {
1237 let cpu_id_entry = CpuIdEntry {
1238 function: entry.function,
1239 index: entry.index,
1240 flags: entry.flags,
1241 cpuid: CpuidResult {
1242 eax: entry.eax,
1243 ebx: entry.ebx,
1244 ecx: entry.ecx,
1245 edx: entry.edx,
1246 },
1247 };
1248 cpu_id_entries.push(cpu_id_entry)
1249 }
1250 CpuId { cpu_id_entries }
1251 }
1252}
1253
1254impl From<&CpuId> for Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1255 fn from(cpuid: &CpuId) -> Box<kvm_cpuid2<[kvm_cpuid_entry2]>> {
1256 let mut kvm =
1257 kvm_cpuid2::<[kvm_cpuid_entry2]>::new_box_zeroed_with_elems(cpuid.cpu_id_entries.len())
1258 .unwrap();
1259 kvm.nent = cpuid.cpu_id_entries.len().try_into().unwrap();
1260 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1261 kvm.entries[i] = kvm_cpuid_entry2 {
1262 function: e.function,
1263 index: e.index,
1264 flags: e.flags,
1265 eax: e.cpuid.eax,
1266 ebx: e.cpuid.ebx,
1267 ecx: e.cpuid.ecx,
1268 edx: e.cpuid.edx,
1269 ..Default::default()
1270 };
1271 }
1272 kvm
1273 }
1274}
1275
1276impl From<&ClockState> for kvm_clock_data {
1277 fn from(state: &ClockState) -> Self {
1278 kvm_clock_data {
1279 clock: state.clock,
1280 ..Default::default()
1281 }
1282 }
1283}
1284
1285impl From<&kvm_clock_data> for ClockState {
1286 fn from(clock_data: &kvm_clock_data) -> Self {
1287 ClockState {
1288 clock: clock_data.clock,
1289 }
1290 }
1291}
1292
1293impl From<&kvm_pic_state> for PicState {
1294 fn from(item: &kvm_pic_state) -> Self {
1295 PicState {
1296 last_irr: item.last_irr,
1297 irr: item.irr,
1298 imr: item.imr,
1299 isr: item.isr,
1300 priority_add: item.priority_add,
1301 irq_base: item.irq_base,
1302 read_reg_select: item.read_reg_select != 0,
1303 poll: item.poll != 0,
1304 special_mask: item.special_mask != 0,
1305 init_state: item.init_state.into(),
1306 auto_eoi: item.auto_eoi != 0,
1307 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1308 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1309 use_4_byte_icw: item.init4 != 0,
1310 elcr: item.elcr,
1311 elcr_mask: item.elcr_mask,
1312 }
1313 }
1314}
1315
1316impl From<&PicState> for kvm_pic_state {
1317 fn from(item: &PicState) -> Self {
1318 kvm_pic_state {
1319 last_irr: item.last_irr,
1320 irr: item.irr,
1321 imr: item.imr,
1322 isr: item.isr,
1323 priority_add: item.priority_add,
1324 irq_base: item.irq_base,
1325 read_reg_select: item.read_reg_select as u8,
1326 poll: item.poll as u8,
1327 special_mask: item.special_mask as u8,
1328 init_state: item.init_state as u8,
1329 auto_eoi: item.auto_eoi as u8,
1330 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1331 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1332 init4: item.use_4_byte_icw as u8,
1333 elcr: item.elcr,
1334 elcr_mask: item.elcr_mask,
1335 }
1336 }
1337}
1338
1339impl From<&kvm_ioapic_state> for IoapicState {
1340 fn from(item: &kvm_ioapic_state) -> Self {
1341 let mut state = IoapicState {
1342 base_address: item.base_address,
1343 ioregsel: item.ioregsel as u8,
1344 ioapicid: item.id,
1345 current_interrupt_level_bitmap: item.irr,
1346 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1347 };
1348 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1349 *out_state = in_state.into();
1350 }
1351 state
1352 }
1353}
1354
1355impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
1356 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1357 kvm_ioapic_state__bindgen_ty_1 {
1358 bits: item.get(0, 64),
1361 }
1362 }
1363}
1364
1365impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
1366 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1367 let mut entry = IoapicRedirectionTableEntry::default();
1368 entry.set(0, 64, unsafe { item.bits });
1372 entry
1373 }
1374}
1375
1376impl From<&IoapicState> for kvm_ioapic_state {
1377 fn from(item: &IoapicState) -> Self {
1378 let mut state = kvm_ioapic_state {
1379 base_address: item.base_address,
1380 ioregsel: item.ioregsel as u32,
1381 id: item.ioapicid,
1382 irr: item.current_interrupt_level_bitmap,
1383 ..Default::default()
1384 };
1385 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1386 *out_state = in_state.into();
1387 }
1388 state
1389 }
1390}
1391
1392impl From<&LapicState> for kvm_lapic_state {
1393 fn from(item: &LapicState) -> Self {
1394 let mut state = kvm_lapic_state::default();
1395 for (reg, value) in item.regs.iter().enumerate() {
1397 let reg_offset = 16 * reg;
1399 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1400
1401 for (i, v) in value.to_le_bytes().iter().enumerate() {
1404 regs_slice[i] = *v as i8;
1405 }
1406 }
1407 state
1408 }
1409}
1410
1411impl From<&kvm_lapic_state> for LapicState {
1412 fn from(item: &kvm_lapic_state) -> Self {
1413 let mut state = LapicState { regs: [0; 64] };
1414 for reg in 0..64 {
1416 let reg_offset = 16 * reg;
1418
1419 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1421 let mut bytes = [0u8; 4];
1422 for i in 0..4 {
1423 bytes[i] = reg_slice[i] as u8;
1424 }
1425 state.regs[reg] = u32::from_le_bytes(bytes);
1426 }
1427 state
1428 }
1429}
1430
1431impl From<&PitState> for kvm_pit_state2 {
1432 fn from(item: &PitState) -> Self {
1433 kvm_pit_state2 {
1434 channels: [
1435 kvm_pit_channel_state::from(&item.channels[0]),
1436 kvm_pit_channel_state::from(&item.channels[1]),
1437 kvm_pit_channel_state::from(&item.channels[2]),
1438 ],
1439 flags: item.flags,
1440 ..Default::default()
1441 }
1442 }
1443}
1444
1445impl From<&kvm_pit_state2> for PitState {
1446 fn from(item: &kvm_pit_state2) -> Self {
1447 PitState {
1448 channels: [
1449 PitChannelState::from(&item.channels[0]),
1450 PitChannelState::from(&item.channels[1]),
1451 PitChannelState::from(&item.channels[2]),
1452 ],
1453 flags: item.flags,
1454 }
1455 }
1456}
1457
1458impl From<&PitChannelState> for kvm_pit_channel_state {
1459 fn from(item: &PitChannelState) -> Self {
1460 kvm_pit_channel_state {
1461 count: item.count,
1462 latched_count: item.latched_count,
1463 count_latched: item.count_latched as u8,
1464 status_latched: item.status_latched as u8,
1465 status: item.status,
1466 read_state: item.read_state as u8,
1467 write_state: item.write_state as u8,
1468 write_latch: item.reload_value as u8,
1470 rw_mode: item.rw_mode as u8,
1471 mode: item.mode,
1472 bcd: item.bcd as u8,
1473 gate: item.gate as u8,
1474 count_load_time: item.count_load_time as i64,
1475 }
1476 }
1477}
1478
1479impl From<&kvm_pit_channel_state> for PitChannelState {
1480 fn from(item: &kvm_pit_channel_state) -> Self {
1481 PitChannelState {
1482 count: item.count,
1483 latched_count: item.latched_count,
1484 count_latched: item.count_latched.into(),
1485 status_latched: item.status_latched != 0,
1486 status: item.status,
1487 read_state: item.read_state.into(),
1488 write_state: item.write_state.into(),
1489 reload_value: item.write_latch as u16,
1491 rw_mode: item.rw_mode.into(),
1492 mode: item.mode,
1493 bcd: item.bcd != 0,
1494 gate: item.gate != 0,
1495 count_load_time: item.count_load_time as u64,
1496 }
1497 }
1498}
1499
1500pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1504 match chip {
1505 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1506 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1507 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1508 _ => {
1509 error!("Invalid IrqChipSource for X86 {:?}", chip);
1510 0
1511 }
1512 }
1513}
1514
1515impl From<&kvm_regs> for Regs {
1516 fn from(r: &kvm_regs) -> Self {
1517 Regs {
1518 rax: r.rax,
1519 rbx: r.rbx,
1520 rcx: r.rcx,
1521 rdx: r.rdx,
1522 rsi: r.rsi,
1523 rdi: r.rdi,
1524 rsp: r.rsp,
1525 rbp: r.rbp,
1526 r8: r.r8,
1527 r9: r.r9,
1528 r10: r.r10,
1529 r11: r.r11,
1530 r12: r.r12,
1531 r13: r.r13,
1532 r14: r.r14,
1533 r15: r.r15,
1534 rip: r.rip,
1535 rflags: r.rflags,
1536 }
1537 }
1538}
1539
1540impl From<&Regs> for kvm_regs {
1541 fn from(r: &Regs) -> Self {
1542 kvm_regs {
1543 rax: r.rax,
1544 rbx: r.rbx,
1545 rcx: r.rcx,
1546 rdx: r.rdx,
1547 rsi: r.rsi,
1548 rdi: r.rdi,
1549 rsp: r.rsp,
1550 rbp: r.rbp,
1551 r8: r.r8,
1552 r9: r.r9,
1553 r10: r.r10,
1554 r11: r.r11,
1555 r12: r.r12,
1556 r13: r.r13,
1557 r14: r.r14,
1558 r15: r.r15,
1559 rip: r.rip,
1560 rflags: r.rflags,
1561 }
1562 }
1563}
1564
1565impl From<&VcpuEvents> for kvm_vcpu_events {
1566 fn from(ve: &VcpuEvents) -> Self {
1567 let mut kvm_ve: kvm_vcpu_events = Default::default();
1568
1569 kvm_ve.exception.injected = ve.exception.injected as u8;
1570 kvm_ve.exception.nr = ve.exception.nr;
1571 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1572 if let Some(pending) = ve.exception.pending {
1573 kvm_ve.exception.pending = pending as u8;
1574 if ve.exception_payload.is_some() {
1575 kvm_ve.exception_has_payload = true as u8;
1576 }
1577 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1578 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1579 }
1580 kvm_ve.exception.error_code = ve.exception.error_code;
1581
1582 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1583 kvm_ve.interrupt.nr = ve.interrupt.nr;
1584 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1585 if let Some(shadow) = ve.interrupt.shadow {
1586 kvm_ve.interrupt.shadow = shadow;
1587 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1588 }
1589
1590 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1591 if let Some(pending) = ve.nmi.pending {
1592 kvm_ve.nmi.pending = pending as u8;
1593 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1594 }
1595 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1596
1597 if let Some(sipi_vector) = ve.sipi_vector {
1598 kvm_ve.sipi_vector = sipi_vector;
1599 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1600 }
1601
1602 if let Some(smm) = ve.smi.smm {
1603 kvm_ve.smi.smm = smm as u8;
1604 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1605 }
1606 kvm_ve.smi.pending = ve.smi.pending as u8;
1607 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1608 kvm_ve.smi.latched_init = ve.smi.latched_init;
1609
1610 if let Some(pending) = ve.triple_fault.pending {
1611 kvm_ve.triple_fault.pending = pending as u8;
1612 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1613 }
1614 kvm_ve
1615 }
1616}
1617
1618impl From<&kvm_vcpu_events> for VcpuEvents {
1619 fn from(ve: &kvm_vcpu_events) -> Self {
1620 let exception = VcpuExceptionState {
1621 injected: ve.exception.injected != 0,
1622 nr: ve.exception.nr,
1623 has_error_code: ve.exception.has_error_code != 0,
1624 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1625 Some(ve.exception.pending != 0)
1626 } else {
1627 None
1628 },
1629 error_code: ve.exception.error_code,
1630 };
1631
1632 let interrupt = VcpuInterruptState {
1633 injected: ve.interrupt.injected != 0,
1634 nr: ve.interrupt.nr,
1635 soft: ve.interrupt.soft != 0,
1636 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1637 Some(ve.interrupt.shadow)
1638 } else {
1639 None
1640 },
1641 };
1642
1643 let nmi = VcpuNmiState {
1644 injected: ve.interrupt.injected != 0,
1645 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1646 Some(ve.nmi.pending != 0)
1647 } else {
1648 None
1649 },
1650 masked: ve.nmi.masked != 0,
1651 };
1652
1653 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1654 Some(ve.sipi_vector)
1655 } else {
1656 None
1657 };
1658
1659 let smi = VcpuSmiState {
1660 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1661 Some(ve.smi.smm != 0)
1662 } else {
1663 None
1664 },
1665 pending: ve.smi.pending != 0,
1666 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1667 latched_init: ve.smi.latched_init,
1668 };
1669
1670 let triple_fault = VcpuTripleFaultState {
1671 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1672 Some(ve.triple_fault.pending != 0)
1673 } else {
1674 None
1675 },
1676 };
1677
1678 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1679 Some(ve.exception_payload)
1680 } else {
1681 None
1682 };
1683
1684 VcpuEvents {
1685 exception,
1686 interrupt,
1687 nmi,
1688 sipi_vector,
1689 smi,
1690 triple_fault,
1691 exception_payload,
1692 }
1693 }
1694}
1695
1696impl From<&kvm_segment> for Segment {
1697 fn from(s: &kvm_segment) -> Self {
1698 Segment {
1699 base: s.base,
1700 limit_bytes: s.limit,
1701 selector: s.selector,
1702 type_: s.type_,
1703 present: s.present,
1704 dpl: s.dpl,
1705 db: s.db,
1706 s: s.s,
1707 l: s.l,
1708 g: s.g,
1709 avl: s.avl,
1710 }
1711 }
1712}
1713
1714impl From<&Segment> for kvm_segment {
1715 fn from(s: &Segment) -> Self {
1716 kvm_segment {
1717 base: s.base,
1718 limit: s.limit_bytes,
1719 selector: s.selector,
1720 type_: s.type_,
1721 present: s.present,
1722 dpl: s.dpl,
1723 db: s.db,
1724 s: s.s,
1725 l: s.l,
1726 g: s.g,
1727 avl: s.avl,
1728 unusable: match s.present {
1729 0 => 1,
1730 _ => 0,
1731 },
1732 ..Default::default()
1733 }
1734 }
1735}
1736
1737impl From<&kvm_dtable> for DescriptorTable {
1738 fn from(dt: &kvm_dtable) -> Self {
1739 DescriptorTable {
1740 base: dt.base,
1741 limit: dt.limit,
1742 }
1743 }
1744}
1745
1746impl From<&DescriptorTable> for kvm_dtable {
1747 fn from(dt: &DescriptorTable) -> Self {
1748 kvm_dtable {
1749 base: dt.base,
1750 limit: dt.limit,
1751 ..Default::default()
1752 }
1753 }
1754}
1755
1756impl From<&kvm_sregs> for Sregs {
1757 fn from(r: &kvm_sregs) -> Self {
1758 Sregs {
1759 cs: Segment::from(&r.cs),
1760 ds: Segment::from(&r.ds),
1761 es: Segment::from(&r.es),
1762 fs: Segment::from(&r.fs),
1763 gs: Segment::from(&r.gs),
1764 ss: Segment::from(&r.ss),
1765 tr: Segment::from(&r.tr),
1766 ldt: Segment::from(&r.ldt),
1767 gdt: DescriptorTable::from(&r.gdt),
1768 idt: DescriptorTable::from(&r.idt),
1769 cr0: r.cr0,
1770 cr2: r.cr2,
1771 cr3: r.cr3,
1772 cr4: r.cr4,
1773 cr8: r.cr8,
1774 efer: r.efer,
1775 }
1776 }
1777}
1778
1779impl From<&kvm_fpu> for Fpu {
1780 fn from(r: &kvm_fpu) -> Self {
1781 Fpu {
1782 fpr: FpuReg::from_16byte_arrays(&r.fpr),
1783 fcw: r.fcw,
1784 fsw: r.fsw,
1785 ftwx: r.ftwx,
1786 last_opcode: r.last_opcode,
1787 last_ip: r.last_ip,
1788 last_dp: r.last_dp,
1789 xmm: r.xmm,
1790 mxcsr: r.mxcsr,
1791 }
1792 }
1793}
1794
1795impl From<&Fpu> for kvm_fpu {
1796 fn from(r: &Fpu) -> Self {
1797 kvm_fpu {
1798 fpr: FpuReg::to_16byte_arrays(&r.fpr),
1799 fcw: r.fcw,
1800 fsw: r.fsw,
1801 ftwx: r.ftwx,
1802 last_opcode: r.last_opcode,
1803 last_ip: r.last_ip,
1804 last_dp: r.last_dp,
1805 xmm: r.xmm,
1806 mxcsr: r.mxcsr,
1807 ..Default::default()
1808 }
1809 }
1810}
1811
1812impl From<&kvm_debugregs> for DebugRegs {
1813 fn from(r: &kvm_debugregs) -> Self {
1814 DebugRegs {
1815 db: r.db,
1816 dr6: r.dr6,
1817 dr7: r.dr7,
1818 }
1819 }
1820}
1821
1822impl From<&DebugRegs> for kvm_debugregs {
1823 fn from(r: &DebugRegs) -> Self {
1824 kvm_debugregs {
1825 db: r.db,
1826 dr6: r.dr6,
1827 dr7: r.dr7,
1828 ..Default::default()
1829 }
1830 }
1831}
1832
1833#[cfg(test)]
1834mod tests {
1835 use super::*;
1836
1837 #[test]
1838 fn vcpu_event_to_from() {
1839 let mut kvm_ve: kvm_vcpu_events = Default::default();
1841 kvm_ve.exception.injected = 1;
1842 kvm_ve.exception.nr = 65;
1843 kvm_ve.exception.has_error_code = 1;
1844 kvm_ve.exception.error_code = 110;
1845 kvm_ve.exception.pending = 1;
1846
1847 kvm_ve.interrupt.injected = 1;
1848 kvm_ve.interrupt.nr = 100;
1849 kvm_ve.interrupt.soft = 1;
1850 kvm_ve.interrupt.shadow = 114;
1851
1852 kvm_ve.nmi.injected = 1;
1853 kvm_ve.nmi.pending = 1;
1854 kvm_ve.nmi.masked = 0;
1855
1856 kvm_ve.sipi_vector = 105;
1857
1858 kvm_ve.smi.smm = 1;
1859 kvm_ve.smi.pending = 1;
1860 kvm_ve.smi.smm_inside_nmi = 1;
1861 kvm_ve.smi.latched_init = 100;
1862
1863 kvm_ve.triple_fault.pending = 0;
1864
1865 kvm_ve.exception_payload = 33;
1866 kvm_ve.exception_has_payload = 1;
1867
1868 kvm_ve.flags = 0
1869 | KVM_VCPUEVENT_VALID_PAYLOAD
1870 | KVM_VCPUEVENT_VALID_SMM
1871 | KVM_VCPUEVENT_VALID_NMI_PENDING
1872 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1873 | KVM_VCPUEVENT_VALID_SHADOW;
1874
1875 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1876 assert_eq!(ve.exception.injected, true);
1877 assert_eq!(ve.exception.nr, 65);
1878 assert_eq!(ve.exception.has_error_code, true);
1879 assert_eq!(ve.exception.error_code, 110);
1880 assert_eq!(ve.exception.pending.unwrap(), true);
1881
1882 assert_eq!(ve.interrupt.injected, true);
1883 assert_eq!(ve.interrupt.nr, 100);
1884 assert_eq!(ve.interrupt.soft, true);
1885 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1886
1887 assert_eq!(ve.nmi.injected, true);
1888 assert_eq!(ve.nmi.pending.unwrap(), true);
1889 assert_eq!(ve.nmi.masked, false);
1890
1891 assert_eq!(ve.sipi_vector.unwrap(), 105);
1892
1893 assert_eq!(ve.smi.smm.unwrap(), true);
1894 assert_eq!(ve.smi.pending, true);
1895 assert_eq!(ve.smi.smm_inside_nmi, true);
1896 assert_eq!(ve.smi.latched_init, 100);
1897
1898 assert_eq!(ve.triple_fault.pending, None);
1899
1900 assert_eq!(ve.exception_payload.unwrap(), 33);
1901
1902 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1903 assert_eq!(kvm_ve_restored.exception.injected, 1);
1904 assert_eq!(kvm_ve_restored.exception.nr, 65);
1905 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1906 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1907 assert_eq!(kvm_ve_restored.exception.pending, 1);
1908
1909 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1910 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1911 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1912 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1913
1914 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1915 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1916 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1917
1918 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1919
1920 assert_eq!(kvm_ve_restored.smi.smm, 1);
1921 assert_eq!(kvm_ve_restored.smi.pending, 1);
1922 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1923 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1924
1925 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1926
1927 assert_eq!(kvm_ve_restored.exception_payload, 33);
1928 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1929 }
1930}