1use std::arch::x86_64::CpuidResult;
6use std::collections::BTreeMap;
7
8use base::errno_result;
9use base::error;
10use base::ioctl;
11use base::ioctl_with_mut_ptr;
12use base::ioctl_with_mut_ref;
13use base::ioctl_with_ptr;
14use base::ioctl_with_ref;
15use base::ioctl_with_val;
16use base::AsRawDescriptor;
17use base::Error;
18use base::IoctlNr;
19use base::MappedRegion;
20use base::Result;
21use data_model::vec_with_array_field;
22use data_model::FlexibleArrayWrapper;
23use kvm_sys::*;
24use libc::E2BIG;
25use libc::EAGAIN;
26use libc::EINVAL;
27use libc::EIO;
28use libc::ENOMEM;
29use libc::ENXIO;
30use serde::Deserialize;
31use serde::Serialize;
32use snapshot::AnySnapshot;
33use vm_memory::GuestAddress;
34
35use super::Config;
36use super::Kvm;
37use super::KvmCap;
38use super::KvmVcpu;
39use super::KvmVm;
40use crate::host_phys_addr_bits;
41use crate::ClockState;
42use crate::CpuId;
43use crate::CpuIdEntry;
44use crate::DebugRegs;
45use crate::DescriptorTable;
46use crate::DeviceKind;
47use crate::Fpu;
48use crate::FpuReg;
49use crate::HypervisorX86_64;
50use crate::IoapicRedirectionTableEntry;
51use crate::IoapicState;
52use crate::IrqSourceChip;
53use crate::LapicState;
54use crate::PicSelect;
55use crate::PicState;
56use crate::PitChannelState;
57use crate::PitState;
58use crate::ProtectionType;
59use crate::Regs;
60use crate::Segment;
61use crate::Sregs;
62use crate::VcpuExit;
63use crate::VcpuX86_64;
64use crate::VmCap;
65use crate::VmX86_64;
66use crate::Xsave;
67use crate::NUM_IOAPIC_PINS;
68
69type KvmCpuId = FlexibleArrayWrapper<kvm_cpuid2, kvm_cpuid_entry2>;
70const KVM_XSAVE_MAX_SIZE: usize = 4096;
71const MSR_IA32_APICBASE: u32 = 0x0000001b;
72
73#[derive(Debug, Clone, Serialize, Deserialize)]
74pub struct VcpuEvents {
75 pub exception: VcpuExceptionState,
76 pub interrupt: VcpuInterruptState,
77 pub nmi: VcpuNmiState,
78 pub sipi_vector: Option<u32>,
79 pub smi: VcpuSmiState,
80 pub triple_fault: VcpuTripleFaultState,
81 pub exception_payload: Option<u64>,
82}
83
84#[derive(Debug, Clone, Serialize, Deserialize)]
85pub struct VcpuExceptionState {
86 pub injected: bool,
87 pub nr: u8,
88 pub has_error_code: bool,
89 pub pending: Option<bool>,
90 pub error_code: u32,
91}
92
93#[derive(Debug, Clone, Serialize, Deserialize)]
94pub struct VcpuInterruptState {
95 pub injected: bool,
96 pub nr: u8,
97 pub soft: bool,
98 pub shadow: Option<u8>,
99}
100
101#[derive(Debug, Clone, Serialize, Deserialize)]
102pub struct VcpuNmiState {
103 pub injected: bool,
104 pub pending: Option<bool>,
105 pub masked: bool,
106}
107
108#[derive(Debug, Clone, Serialize, Deserialize)]
109pub struct VcpuSmiState {
110 pub smm: Option<bool>,
111 pub pending: bool,
112 pub smm_inside_nmi: bool,
113 pub latched_init: u8,
114}
115
116#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct VcpuTripleFaultState {
118 pub pending: Option<bool>,
119}
120
121pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
122 descriptor: &T,
123 kind: IoctlNr,
124 initial_capacity: usize,
125) -> Result<CpuId> {
126 let mut entries: usize = initial_capacity;
127
128 loop {
129 let mut kvm_cpuid = KvmCpuId::new(entries);
130
131 let ret = {
132 unsafe { ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) }
137 };
138 if ret < 0 {
139 let err = Error::last();
140 match err.errno() {
141 E2BIG => {
142 if let Some(val) = entries.checked_mul(2) {
144 entries = val;
145 } else {
146 return Err(err);
147 }
148 }
149 _ => return Err(err),
150 }
151 } else {
152 return Ok(CpuId::from(&kvm_cpuid));
153 }
154 }
155}
156
157impl Kvm {
158 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
159 const KVM_MAX_ENTRIES: usize = 256;
160 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
161 }
162
163 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
164 if protection_type.isolates_memory() {
165 Ok(KVM_X86_PKVM_PROTECTED_VM)
166 } else {
167 Ok(KVM_X86_DEFAULT_VM)
168 }
169 }
170
171 pub fn get_guest_phys_addr_bits(&self) -> u8 {
173 host_phys_addr_bits()
175 }
176}
177
178impl HypervisorX86_64 for Kvm {
179 fn get_supported_cpuid(&self) -> Result<CpuId> {
180 self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
181 }
182
183 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
184 const MAX_KVM_MSR_ENTRIES: usize = 256;
185
186 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
187 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
188
189 let ret = {
190 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list[0]) }
195 };
196 if ret < 0 {
197 return errno_result();
198 }
199
200 let mut nmsrs = msr_list[0].nmsrs;
201
202 let indices: &[u32] = unsafe {
206 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
207 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
208 }
209 msr_list[0].indices.as_slice(nmsrs as usize)
210 };
211
212 Ok(indices.to_vec())
213 }
214}
215
216impl KvmVm {
217 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
219 Ok(())
220 }
221
222 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
225 match c {
226 VmCap::PvClock => Some(true),
227 _ => None,
228 }
229 }
230
231 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
234 None
235 }
236
237 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
239 let mut clock_data: kvm_clock_data = Default::default();
240 let ret =
241 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
245 if ret == 0 {
246 Ok(ClockState::from(&clock_data))
247 } else {
248 errno_result()
249 }
250 }
251
252 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
254 let clock_data = kvm_clock_data::from(state);
255 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
259 if ret == 0 {
260 Ok(())
261 } else {
262 errno_result()
263 }
264 }
265
266 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
270 let mut irqchip_state = kvm_irqchip {
271 chip_id: id as u32,
272 ..Default::default()
273 };
274 let ret = {
275 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
279 };
280 if ret == 0 {
281 Ok(
282 unsafe { irqchip_state.chip.pic },
286 )
287 } else {
288 errno_result()
289 }
290 }
291
292 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
296 let mut irqchip_state = kvm_irqchip {
297 chip_id: id as u32,
298 ..Default::default()
299 };
300 irqchip_state.chip.pic = *state;
301 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
305 if ret == 0 {
306 Ok(())
307 } else {
308 errno_result()
309 }
310 }
311
312 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
314 Ok(NUM_IOAPIC_PINS)
315 }
316
317 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
321 let mut irqchip_state = kvm_irqchip {
322 chip_id: 2,
323 ..Default::default()
324 };
325 let ret = {
326 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
330 };
331 if ret == 0 {
332 Ok(
333 unsafe { irqchip_state.chip.ioapic },
337 )
338 } else {
339 errno_result()
340 }
341 }
342
343 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
347 let mut irqchip_state = kvm_irqchip {
348 chip_id: 2,
349 ..Default::default()
350 };
351 irqchip_state.chip.ioapic = *state;
352 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
356 if ret == 0 {
357 Ok(())
358 } else {
359 errno_result()
360 }
361 }
362
363 pub fn create_pit(&self) -> Result<()> {
367 let pit_config = kvm_pit_config::default();
368 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
372 if ret == 0 {
373 Ok(())
374 } else {
375 errno_result()
376 }
377 }
378
379 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
383 let mut pit_state = Default::default();
384 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
388 if ret == 0 {
389 Ok(pit_state)
390 } else {
391 errno_result()
392 }
393 }
394
395 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
399 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
403 if ret == 0 {
404 Ok(())
405 } else {
406 errno_result()
407 }
408 }
409
410 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
412 let mut cap = kvm_enable_cap {
413 cap: KVM_CAP_MSR_PLATFORM_INFO,
414 ..Default::default()
415 };
416 cap.args[0] = allow_read as u64;
417
418 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
423 if ret < 0 {
424 errno_result()
425 } else {
426 Ok(())
427 }
428 }
429
430 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
432 let mut cap = kvm_enable_cap {
433 cap: KVM_CAP_SPLIT_IRQCHIP,
434 ..Default::default()
435 };
436 cap.args[0] = ioapic_pins as u64;
437 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
441 if ret < 0 {
442 errno_result()
443 } else {
444 Ok(())
445 }
446 }
447
448 fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
455 let mut info = KvmProtectedVmInfo {
456 firmware_size: 0,
457 reserved: [0; 7],
458 };
459 unsafe {
463 self.enable_raw_capability(
464 KvmCap::X86ProtectedVm,
465 KVM_CAP_X86_PROTECTED_VM_FLAGS_INFO,
466 &[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
467 )
468 }?;
469 Ok(info)
470 }
471
472 fn set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()> {
473 unsafe {
476 self.enable_raw_capability(
477 KvmCap::X86ProtectedVm,
478 KVM_CAP_X86_PROTECTED_VM_FLAGS_SET_FW_GPA,
479 &[fw_addr.0, 0, 0, 0],
480 )
481 }
482 }
483}
484
485#[repr(C)]
486struct KvmProtectedVmInfo {
487 firmware_size: u64,
488 reserved: [u64; 7],
489}
490
491impl VmX86_64 for KvmVm {
492 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
493 &self.kvm
494 }
495
496 fn load_protected_vm_firmware(
497 &mut self,
498 fw_addr: GuestAddress,
499 fw_max_size: u64,
500 ) -> Result<()> {
501 let info = self.get_protected_vm_info()?;
502 if info.firmware_size == 0 {
503 Err(Error::new(EINVAL))
504 } else {
505 if info.firmware_size > fw_max_size {
506 return Err(Error::new(ENOMEM));
507 }
508 self.set_protected_vm_firmware_gpa(fw_addr)
509 }
510 }
511
512 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
513 Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
516 }
517
518 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
522 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
525 if ret == 0 {
526 Ok(())
527 } else {
528 errno_result()
529 }
530 }
531
532 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
536 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
539 if ret == 0 {
540 Ok(())
541 } else {
542 errno_result()
543 }
544 }
545}
546
547impl KvmVcpu {
548 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
551 Ok(VcpuExit::SystemEventReset)
552 }
553
554 fn xsave_size(&self) -> Result<usize> {
561 let size = {
562 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
565 };
566 if size < 0 {
567 return errno_result();
568 }
569 let size: usize = size.try_into().unwrap();
571 Ok(size.max(KVM_XSAVE_MAX_SIZE))
572 }
573
574 #[inline]
575 pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
576 match run.exit_reason {
577 KVM_EXIT_IO => Some(VcpuExit::Io),
578 KVM_EXIT_IOAPIC_EOI => {
579 let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
583 Some(VcpuExit::IoapicEoi { vector })
584 }
585 KVM_EXIT_HLT => Some(VcpuExit::Hlt),
586 KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
587 KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
588 KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
589 _ => None,
590 }
591 }
592}
593
594#[derive(Debug, Serialize, Deserialize)]
595struct HypervisorState {
596 interrupts: VcpuEvents,
597 nested_state: Vec<u8>,
598}
599
600impl VcpuX86_64 for KvmVcpu {
601 #[allow(clippy::cast_ptr_alignment)]
602 fn set_interrupt_window_requested(&self, requested: bool) {
603 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
608 run.request_interrupt_window = requested.into();
609 }
610
611 #[allow(clippy::cast_ptr_alignment)]
612 fn ready_for_interrupt(&self) -> bool {
613 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
618 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
619 }
620
621 fn interrupt(&self, irq: u8) -> Result<()> {
626 if !self.ready_for_interrupt() {
627 return Err(Error::new(EAGAIN));
628 }
629
630 let interrupt = kvm_interrupt { irq: irq.into() };
631 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
635 if ret == 0 {
636 Ok(())
637 } else {
638 errno_result()
639 }
640 }
641
642 fn inject_nmi(&self) -> Result<()> {
643 let ret = unsafe { ioctl(self, KVM_NMI) };
646 if ret == 0 {
647 Ok(())
648 } else {
649 errno_result()
650 }
651 }
652
653 fn get_regs(&self) -> Result<Regs> {
654 let mut regs: kvm_regs = Default::default();
655 let ret = {
656 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
661 };
662 if ret == 0 {
663 Ok(Regs::from(®s))
664 } else {
665 errno_result()
666 }
667 }
668
669 fn set_regs(&self, regs: &Regs) -> Result<()> {
670 let regs = kvm_regs::from(regs);
671 let ret = {
672 unsafe { ioctl_with_ref(self, KVM_SET_REGS, ®s) }
677 };
678 if ret == 0 {
679 Ok(())
680 } else {
681 errno_result()
682 }
683 }
684
685 fn get_sregs(&self) -> Result<Sregs> {
686 let mut regs: kvm_sregs = Default::default();
687 let ret = {
688 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
693 };
694 if ret == 0 {
695 Ok(Sregs::from(®s))
696 } else {
697 errno_result()
698 }
699 }
700
701 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
702 let mut kvm_sregs: kvm_sregs = Default::default();
705 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
709 if ret != 0 {
710 return errno_result();
711 }
712
713 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
714 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
715 kvm_sregs.es = kvm_segment::from(&sregs.es);
716 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
717 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
718 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
719 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
720 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
721 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
722 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
723 kvm_sregs.cr0 = sregs.cr0;
724 kvm_sregs.cr2 = sregs.cr2;
725 kvm_sregs.cr3 = sregs.cr3;
726 kvm_sregs.cr4 = sregs.cr4;
727 kvm_sregs.cr8 = sregs.cr8;
728 kvm_sregs.efer = sregs.efer;
729
730 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
734 if ret == 0 {
735 Ok(())
736 } else {
737 errno_result()
738 }
739 }
740
741 fn get_fpu(&self) -> Result<Fpu> {
742 let mut fpu: kvm_fpu = Default::default();
743 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
747 if ret == 0 {
748 Ok(Fpu::from(&fpu))
749 } else {
750 errno_result()
751 }
752 }
753
754 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
755 let fpu = kvm_fpu::from(fpu);
756 let ret = {
757 unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
760 };
761 if ret == 0 {
762 Ok(())
763 } else {
764 errno_result()
765 }
766 }
767
768 fn get_xsave(&self) -> Result<Xsave> {
770 let size = self.xsave_size()?;
771 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
772 KVM_GET_XSAVE2
773 } else {
774 KVM_GET_XSAVE
775 };
776 let mut xsave = Xsave::new(size);
777
778 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
782 if ret == 0 {
783 Ok(xsave)
784 } else {
785 errno_result()
786 }
787 }
788
789 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
790 let size = self.xsave_size()?;
791 if xsave.len() != size {
794 return Err(Error::new(EIO));
795 }
796
797 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
803 if ret == 0 {
804 Ok(())
805 } else {
806 errno_result()
807 }
808 }
809
810 fn get_hypervisor_specific_state(&self) -> Result<AnySnapshot> {
811 let mut vcpu_evts: kvm_vcpu_events = Default::default();
812 let ret = { unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) } };
817 if ret != 0 {
818 return errno_result();
819 }
820 let interrupts = VcpuEvents::from(&vcpu_evts);
821 let ret =
822 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_NESTED_STATE as u64) };
825 if ret < 0 {
826 return errno_result();
827 }
828 let nested_state = if ret == 0 {
830 Vec::new()
831 } else {
832 let mut nested_state: Vec<u8> = vec![0; ret as usize];
833 let nested_state_ptr = nested_state.as_ptr() as *mut kvm_nested_state;
834 assert!(nested_state_ptr.is_aligned());
835 unsafe {
843 (*nested_state_ptr).size = ret as u32;
844 }
845 assert!(nested_state.as_ptr().is_aligned());
846 let ret = unsafe {
850 ioctl_with_mut_ptr(self, KVM_GET_NESTED_STATE, nested_state.as_mut_ptr())
851 };
852 if ret < 0 {
853 return errno_result();
854 }
855 nested_state
856 };
857 AnySnapshot::to_any(HypervisorState {
858 interrupts,
859 nested_state,
860 })
861 .map_err(|e| {
862 error!("failed to serialize hypervisor state: {:?}", e);
863 Error::new(EIO)
864 })
865 }
866
867 fn set_hypervisor_specific_state(&self, data: AnySnapshot) -> Result<()> {
868 let hypervisor_state = AnySnapshot::from_any::<HypervisorState>(data).map_err(|e| {
869 error!("failed to deserialize hypervisor_state: {:?}", e);
870 Error::new(EIO)
871 })?;
872 let vcpu_events = kvm_vcpu_events::from(&hypervisor_state.interrupts);
873 let ret = {
874 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
879 };
880 if ret != 0 {
881 return errno_result();
882 }
883 if hypervisor_state.nested_state.is_empty() {
884 return Ok(());
885 }
886 unsafe {
895 let vec_len = hypervisor_state.nested_state.len();
896 assert!(
897 (hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state).is_aligned()
898 );
899 if (*(hypervisor_state.nested_state.as_ptr() as *const kvm_nested_state)).size
900 > vec_len as u32
901 {
902 error!("Invalued nested state data, size larger than vec allocated.");
903 return Err(Error::new(EINVAL));
904 }
905 }
906 let ret = unsafe {
911 ioctl_with_ptr(
912 self,
913 KVM_SET_NESTED_STATE,
914 hypervisor_state.nested_state.as_ptr(),
915 )
916 };
917 if ret == 0 {
918 Ok(())
919 } else {
920 errno_result()
921 }
922 }
923
924 fn get_debugregs(&self) -> Result<DebugRegs> {
925 let mut regs: kvm_debugregs = Default::default();
926 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
930 if ret == 0 {
931 Ok(DebugRegs::from(®s))
932 } else {
933 errno_result()
934 }
935 }
936
937 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
938 let dregs = kvm_debugregs::from(dregs);
939 let ret = {
940 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
943 };
944 if ret == 0 {
945 Ok(())
946 } else {
947 errno_result()
948 }
949 }
950
951 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
952 let mut regs: kvm_xcrs = Default::default();
953 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
957 if ret < 0 {
958 return errno_result();
959 }
960
961 Ok(regs
962 .xcrs
963 .iter()
964 .take(regs.nr_xcrs as usize)
965 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
966 .collect())
967 }
968
969 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
970 let mut kvm_xcr = kvm_xcrs {
971 nr_xcrs: 1,
972 ..Default::default()
973 };
974 kvm_xcr.xcrs[0].xcr = xcr_index;
975 kvm_xcr.xcrs[0].value = value;
976
977 let ret = {
978 unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
981 };
982 if ret == 0 {
983 Ok(())
984 } else {
985 errno_result()
986 }
987 }
988
989 fn get_msr(&self, msr_index: u32) -> Result<u64> {
990 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
991 msrs[0].nmsrs = 1;
992
993 unsafe {
995 let msr_entries = msrs[0].entries.as_mut_slice(1);
996 msr_entries[0].index = msr_index;
997 }
998
999 let ret = {
1000 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs[0]) }
1003 };
1004 if ret < 0 {
1005 return errno_result();
1006 }
1007
1008 if ret != 1 {
1010 return Err(base::Error::new(libc::ENOENT));
1011 }
1012
1013 let value = unsafe {
1016 let msr_entries = msrs[0].entries.as_slice(1);
1017 msr_entries[0].data
1018 };
1019
1020 Ok(value)
1021 }
1022
1023 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
1024 let msr_index_list = self.kvm.get_msr_index_list()?;
1025 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_index_list.len());
1026 kvm_msrs[0].nmsrs = msr_index_list.len() as u32;
1027 unsafe {
1031 kvm_msrs[0]
1032 .entries
1033 .as_mut_slice(msr_index_list.len())
1034 .iter_mut()
1035 .zip(msr_index_list.iter())
1036 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
1037 }
1038
1039 let ret = {
1040 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut kvm_msrs[0]) }
1043 };
1044 if ret < 0 {
1045 return errno_result();
1046 }
1047
1048 let count = ret as usize;
1050 if count != msr_index_list.len() {
1051 error!(
1052 "failed to get all MSRs: requested {}, got {}",
1053 msr_index_list.len(),
1054 count,
1055 );
1056 return Err(base::Error::new(libc::EPERM));
1057 }
1058
1059 let msrs = unsafe {
1062 BTreeMap::from_iter(
1063 kvm_msrs[0]
1064 .entries
1065 .as_slice(count)
1066 .iter()
1067 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
1068 )
1069 };
1070
1071 Ok(msrs)
1072 }
1073
1074 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1075 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
1076 kvm_msrs[0].nmsrs = 1;
1077
1078 unsafe {
1080 let msr_entries = kvm_msrs[0].entries.as_mut_slice(1);
1081 msr_entries[0].index = msr_index;
1082 msr_entries[0].data = value;
1083 }
1084
1085 let ret = {
1086 unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs[0]) }
1089 };
1090 if ret < 0 {
1091 return errno_result();
1092 }
1093
1094 if ret != 1 {
1096 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1097 return Err(base::Error::new(libc::EPERM));
1098 }
1099
1100 Ok(())
1101 }
1102
1103 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1104 let cpuid = KvmCpuId::from(cpuid);
1105 let ret = {
1106 unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2, cpuid.as_ptr()) }
1109 };
1110 if ret == 0 {
1111 Ok(())
1112 } else {
1113 errno_result()
1114 }
1115 }
1116
1117 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1118 use kvm_sys::*;
1119 let mut dbg: kvm_guest_debug = Default::default();
1120
1121 if addrs.len() > 4 {
1122 error!(
1123 "Support 4 breakpoints at most but {} addresses are passed",
1124 addrs.len()
1125 );
1126 return Err(base::Error::new(libc::EINVAL));
1127 }
1128
1129 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1130 if enable_singlestep {
1131 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1132 }
1133
1134 dbg.arch.debugreg[7] = 0x0600;
1138
1139 for (i, addr) in addrs.iter().enumerate() {
1140 dbg.arch.debugreg[i] = addr.0;
1141 dbg.arch.debugreg[7] |= 2 << (i * 2);
1143 }
1144
1145 let ret = {
1146 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1149 };
1150 if ret == 0 {
1151 Ok(())
1152 } else {
1153 errno_result()
1154 }
1155 }
1156
1157 fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1159 Err(Error::new(ENXIO))
1160 }
1161
1162 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1163 Ok(())
1165 }
1166}
1167
1168impl KvmVcpu {
1169 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1174 let mut klapic: kvm_lapic_state = Default::default();
1175
1176 let ret = {
1177 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1181 };
1182 if ret < 0 {
1183 return errno_result();
1184 }
1185 Ok(klapic)
1186 }
1187
1188 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1193 let ret = {
1194 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1197 };
1198 if ret < 0 {
1199 return errno_result();
1200 }
1201 Ok(())
1202 }
1203
1204 pub fn get_apic_base(&self) -> Result<u64> {
1208 self.get_msr(MSR_IA32_APICBASE)
1209 }
1210
1211 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1215 self.set_msr(MSR_IA32_APICBASE, apic_base)
1216 }
1217
1218 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1222 let mut regs: kvm_sregs = Default::default();
1223 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1227 if ret >= 0 {
1228 Ok(regs.interrupt_bitmap)
1229 } else {
1230 errno_result()
1231 }
1232 }
1233
1234 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1238 let mut regs: kvm_sregs = Default::default();
1242 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1246 if ret >= 0 {
1247 regs.interrupt_bitmap = interrupt_bitmap;
1248 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, ®s) };
1253 if ret >= 0 {
1254 Ok(())
1255 } else {
1256 errno_result()
1257 }
1258 } else {
1259 errno_result()
1260 }
1261 }
1262}
1263
1264impl<'a> From<&'a KvmCpuId> for CpuId {
1265 fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
1266 let kvm_entries = kvm_cpuid.entries_slice();
1267 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1268
1269 for entry in kvm_entries {
1270 let cpu_id_entry = CpuIdEntry {
1271 function: entry.function,
1272 index: entry.index,
1273 flags: entry.flags,
1274 cpuid: CpuidResult {
1275 eax: entry.eax,
1276 ebx: entry.ebx,
1277 ecx: entry.ecx,
1278 edx: entry.edx,
1279 },
1280 };
1281 cpu_id_entries.push(cpu_id_entry)
1282 }
1283 CpuId { cpu_id_entries }
1284 }
1285}
1286
1287impl From<&CpuId> for KvmCpuId {
1288 fn from(cpuid: &CpuId) -> KvmCpuId {
1289 let mut kvm = KvmCpuId::new(cpuid.cpu_id_entries.len());
1290 let entries = kvm.mut_entries_slice();
1291 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1292 entries[i] = kvm_cpuid_entry2 {
1293 function: e.function,
1294 index: e.index,
1295 flags: e.flags,
1296 eax: e.cpuid.eax,
1297 ebx: e.cpuid.ebx,
1298 ecx: e.cpuid.ecx,
1299 edx: e.cpuid.edx,
1300 ..Default::default()
1301 };
1302 }
1303 kvm
1304 }
1305}
1306
1307impl From<&ClockState> for kvm_clock_data {
1308 fn from(state: &ClockState) -> Self {
1309 kvm_clock_data {
1310 clock: state.clock,
1311 ..Default::default()
1312 }
1313 }
1314}
1315
1316impl From<&kvm_clock_data> for ClockState {
1317 fn from(clock_data: &kvm_clock_data) -> Self {
1318 ClockState {
1319 clock: clock_data.clock,
1320 }
1321 }
1322}
1323
1324impl From<&kvm_pic_state> for PicState {
1325 fn from(item: &kvm_pic_state) -> Self {
1326 PicState {
1327 last_irr: item.last_irr,
1328 irr: item.irr,
1329 imr: item.imr,
1330 isr: item.isr,
1331 priority_add: item.priority_add,
1332 irq_base: item.irq_base,
1333 read_reg_select: item.read_reg_select != 0,
1334 poll: item.poll != 0,
1335 special_mask: item.special_mask != 0,
1336 init_state: item.init_state.into(),
1337 auto_eoi: item.auto_eoi != 0,
1338 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1339 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1340 use_4_byte_icw: item.init4 != 0,
1341 elcr: item.elcr,
1342 elcr_mask: item.elcr_mask,
1343 }
1344 }
1345}
1346
1347impl From<&PicState> for kvm_pic_state {
1348 fn from(item: &PicState) -> Self {
1349 kvm_pic_state {
1350 last_irr: item.last_irr,
1351 irr: item.irr,
1352 imr: item.imr,
1353 isr: item.isr,
1354 priority_add: item.priority_add,
1355 irq_base: item.irq_base,
1356 read_reg_select: item.read_reg_select as u8,
1357 poll: item.poll as u8,
1358 special_mask: item.special_mask as u8,
1359 init_state: item.init_state as u8,
1360 auto_eoi: item.auto_eoi as u8,
1361 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1362 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1363 init4: item.use_4_byte_icw as u8,
1364 elcr: item.elcr,
1365 elcr_mask: item.elcr_mask,
1366 }
1367 }
1368}
1369
1370impl From<&kvm_ioapic_state> for IoapicState {
1371 fn from(item: &kvm_ioapic_state) -> Self {
1372 let mut state = IoapicState {
1373 base_address: item.base_address,
1374 ioregsel: item.ioregsel as u8,
1375 ioapicid: item.id,
1376 current_interrupt_level_bitmap: item.irr,
1377 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1378 };
1379 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1380 *out_state = in_state.into();
1381 }
1382 state
1383 }
1384}
1385
1386impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
1387 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1388 kvm_ioapic_state__bindgen_ty_1 {
1389 bits: item.get(0, 64),
1392 }
1393 }
1394}
1395
1396impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
1397 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1398 let mut entry = IoapicRedirectionTableEntry::default();
1399 entry.set(0, 64, unsafe { item.bits });
1403 entry
1404 }
1405}
1406
1407impl From<&IoapicState> for kvm_ioapic_state {
1408 fn from(item: &IoapicState) -> Self {
1409 let mut state = kvm_ioapic_state {
1410 base_address: item.base_address,
1411 ioregsel: item.ioregsel as u32,
1412 id: item.ioapicid,
1413 irr: item.current_interrupt_level_bitmap,
1414 ..Default::default()
1415 };
1416 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1417 *out_state = in_state.into();
1418 }
1419 state
1420 }
1421}
1422
1423impl From<&LapicState> for kvm_lapic_state {
1424 fn from(item: &LapicState) -> Self {
1425 let mut state = kvm_lapic_state::default();
1426 for (reg, value) in item.regs.iter().enumerate() {
1428 let reg_offset = 16 * reg;
1430 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1431
1432 for (i, v) in value.to_le_bytes().iter().enumerate() {
1435 regs_slice[i] = *v as i8;
1436 }
1437 }
1438 state
1439 }
1440}
1441
1442impl From<&kvm_lapic_state> for LapicState {
1443 fn from(item: &kvm_lapic_state) -> Self {
1444 let mut state = LapicState { regs: [0; 64] };
1445 for reg in 0..64 {
1447 let reg_offset = 16 * reg;
1449
1450 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1452 let mut bytes = [0u8; 4];
1453 for i in 0..4 {
1454 bytes[i] = reg_slice[i] as u8;
1455 }
1456 state.regs[reg] = u32::from_le_bytes(bytes);
1457 }
1458 state
1459 }
1460}
1461
1462impl From<&PitState> for kvm_pit_state2 {
1463 fn from(item: &PitState) -> Self {
1464 kvm_pit_state2 {
1465 channels: [
1466 kvm_pit_channel_state::from(&item.channels[0]),
1467 kvm_pit_channel_state::from(&item.channels[1]),
1468 kvm_pit_channel_state::from(&item.channels[2]),
1469 ],
1470 flags: item.flags,
1471 ..Default::default()
1472 }
1473 }
1474}
1475
1476impl From<&kvm_pit_state2> for PitState {
1477 fn from(item: &kvm_pit_state2) -> Self {
1478 PitState {
1479 channels: [
1480 PitChannelState::from(&item.channels[0]),
1481 PitChannelState::from(&item.channels[1]),
1482 PitChannelState::from(&item.channels[2]),
1483 ],
1484 flags: item.flags,
1485 }
1486 }
1487}
1488
1489impl From<&PitChannelState> for kvm_pit_channel_state {
1490 fn from(item: &PitChannelState) -> Self {
1491 kvm_pit_channel_state {
1492 count: item.count,
1493 latched_count: item.latched_count,
1494 count_latched: item.count_latched as u8,
1495 status_latched: item.status_latched as u8,
1496 status: item.status,
1497 read_state: item.read_state as u8,
1498 write_state: item.write_state as u8,
1499 write_latch: item.reload_value as u8,
1501 rw_mode: item.rw_mode as u8,
1502 mode: item.mode,
1503 bcd: item.bcd as u8,
1504 gate: item.gate as u8,
1505 count_load_time: item.count_load_time as i64,
1506 }
1507 }
1508}
1509
1510impl From<&kvm_pit_channel_state> for PitChannelState {
1511 fn from(item: &kvm_pit_channel_state) -> Self {
1512 PitChannelState {
1513 count: item.count,
1514 latched_count: item.latched_count,
1515 count_latched: item.count_latched.into(),
1516 status_latched: item.status_latched != 0,
1517 status: item.status,
1518 read_state: item.read_state.into(),
1519 write_state: item.write_state.into(),
1520 reload_value: item.write_latch as u16,
1522 rw_mode: item.rw_mode.into(),
1523 mode: item.mode,
1524 bcd: item.bcd != 0,
1525 gate: item.gate != 0,
1526 count_load_time: item.count_load_time as u64,
1527 }
1528 }
1529}
1530
1531pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1535 match chip {
1536 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1537 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1538 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1539 _ => {
1540 error!("Invalid IrqChipSource for X86 {:?}", chip);
1541 0
1542 }
1543 }
1544}
1545
1546impl From<&kvm_regs> for Regs {
1547 fn from(r: &kvm_regs) -> Self {
1548 Regs {
1549 rax: r.rax,
1550 rbx: r.rbx,
1551 rcx: r.rcx,
1552 rdx: r.rdx,
1553 rsi: r.rsi,
1554 rdi: r.rdi,
1555 rsp: r.rsp,
1556 rbp: r.rbp,
1557 r8: r.r8,
1558 r9: r.r9,
1559 r10: r.r10,
1560 r11: r.r11,
1561 r12: r.r12,
1562 r13: r.r13,
1563 r14: r.r14,
1564 r15: r.r15,
1565 rip: r.rip,
1566 rflags: r.rflags,
1567 }
1568 }
1569}
1570
1571impl From<&Regs> for kvm_regs {
1572 fn from(r: &Regs) -> Self {
1573 kvm_regs {
1574 rax: r.rax,
1575 rbx: r.rbx,
1576 rcx: r.rcx,
1577 rdx: r.rdx,
1578 rsi: r.rsi,
1579 rdi: r.rdi,
1580 rsp: r.rsp,
1581 rbp: r.rbp,
1582 r8: r.r8,
1583 r9: r.r9,
1584 r10: r.r10,
1585 r11: r.r11,
1586 r12: r.r12,
1587 r13: r.r13,
1588 r14: r.r14,
1589 r15: r.r15,
1590 rip: r.rip,
1591 rflags: r.rflags,
1592 }
1593 }
1594}
1595
1596impl From<&VcpuEvents> for kvm_vcpu_events {
1597 fn from(ve: &VcpuEvents) -> Self {
1598 let mut kvm_ve: kvm_vcpu_events = Default::default();
1599
1600 kvm_ve.exception.injected = ve.exception.injected as u8;
1601 kvm_ve.exception.nr = ve.exception.nr;
1602 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1603 if let Some(pending) = ve.exception.pending {
1604 kvm_ve.exception.pending = pending as u8;
1605 if ve.exception_payload.is_some() {
1606 kvm_ve.exception_has_payload = true as u8;
1607 }
1608 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1609 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1610 }
1611 kvm_ve.exception.error_code = ve.exception.error_code;
1612
1613 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1614 kvm_ve.interrupt.nr = ve.interrupt.nr;
1615 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1616 if let Some(shadow) = ve.interrupt.shadow {
1617 kvm_ve.interrupt.shadow = shadow;
1618 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1619 }
1620
1621 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1622 if let Some(pending) = ve.nmi.pending {
1623 kvm_ve.nmi.pending = pending as u8;
1624 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1625 }
1626 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1627
1628 if let Some(sipi_vector) = ve.sipi_vector {
1629 kvm_ve.sipi_vector = sipi_vector;
1630 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1631 }
1632
1633 if let Some(smm) = ve.smi.smm {
1634 kvm_ve.smi.smm = smm as u8;
1635 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1636 }
1637 kvm_ve.smi.pending = ve.smi.pending as u8;
1638 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1639 kvm_ve.smi.latched_init = ve.smi.latched_init;
1640
1641 if let Some(pending) = ve.triple_fault.pending {
1642 kvm_ve.triple_fault.pending = pending as u8;
1643 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1644 }
1645 kvm_ve
1646 }
1647}
1648
1649impl From<&kvm_vcpu_events> for VcpuEvents {
1650 fn from(ve: &kvm_vcpu_events) -> Self {
1651 let exception = VcpuExceptionState {
1652 injected: ve.exception.injected != 0,
1653 nr: ve.exception.nr,
1654 has_error_code: ve.exception.has_error_code != 0,
1655 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1656 Some(ve.exception.pending != 0)
1657 } else {
1658 None
1659 },
1660 error_code: ve.exception.error_code,
1661 };
1662
1663 let interrupt = VcpuInterruptState {
1664 injected: ve.interrupt.injected != 0,
1665 nr: ve.interrupt.nr,
1666 soft: ve.interrupt.soft != 0,
1667 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1668 Some(ve.interrupt.shadow)
1669 } else {
1670 None
1671 },
1672 };
1673
1674 let nmi = VcpuNmiState {
1675 injected: ve.interrupt.injected != 0,
1676 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1677 Some(ve.nmi.pending != 0)
1678 } else {
1679 None
1680 },
1681 masked: ve.nmi.masked != 0,
1682 };
1683
1684 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1685 Some(ve.sipi_vector)
1686 } else {
1687 None
1688 };
1689
1690 let smi = VcpuSmiState {
1691 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1692 Some(ve.smi.smm != 0)
1693 } else {
1694 None
1695 },
1696 pending: ve.smi.pending != 0,
1697 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1698 latched_init: ve.smi.latched_init,
1699 };
1700
1701 let triple_fault = VcpuTripleFaultState {
1702 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1703 Some(ve.triple_fault.pending != 0)
1704 } else {
1705 None
1706 },
1707 };
1708
1709 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1710 Some(ve.exception_payload)
1711 } else {
1712 None
1713 };
1714
1715 VcpuEvents {
1716 exception,
1717 interrupt,
1718 nmi,
1719 sipi_vector,
1720 smi,
1721 triple_fault,
1722 exception_payload,
1723 }
1724 }
1725}
1726
1727impl From<&kvm_segment> for Segment {
1728 fn from(s: &kvm_segment) -> Self {
1729 Segment {
1730 base: s.base,
1731 limit_bytes: s.limit,
1732 selector: s.selector,
1733 type_: s.type_,
1734 present: s.present,
1735 dpl: s.dpl,
1736 db: s.db,
1737 s: s.s,
1738 l: s.l,
1739 g: s.g,
1740 avl: s.avl,
1741 }
1742 }
1743}
1744
1745impl From<&Segment> for kvm_segment {
1746 fn from(s: &Segment) -> Self {
1747 kvm_segment {
1748 base: s.base,
1749 limit: s.limit_bytes,
1750 selector: s.selector,
1751 type_: s.type_,
1752 present: s.present,
1753 dpl: s.dpl,
1754 db: s.db,
1755 s: s.s,
1756 l: s.l,
1757 g: s.g,
1758 avl: s.avl,
1759 unusable: match s.present {
1760 0 => 1,
1761 _ => 0,
1762 },
1763 ..Default::default()
1764 }
1765 }
1766}
1767
1768impl From<&kvm_dtable> for DescriptorTable {
1769 fn from(dt: &kvm_dtable) -> Self {
1770 DescriptorTable {
1771 base: dt.base,
1772 limit: dt.limit,
1773 }
1774 }
1775}
1776
1777impl From<&DescriptorTable> for kvm_dtable {
1778 fn from(dt: &DescriptorTable) -> Self {
1779 kvm_dtable {
1780 base: dt.base,
1781 limit: dt.limit,
1782 ..Default::default()
1783 }
1784 }
1785}
1786
1787impl From<&kvm_sregs> for Sregs {
1788 fn from(r: &kvm_sregs) -> Self {
1789 Sregs {
1790 cs: Segment::from(&r.cs),
1791 ds: Segment::from(&r.ds),
1792 es: Segment::from(&r.es),
1793 fs: Segment::from(&r.fs),
1794 gs: Segment::from(&r.gs),
1795 ss: Segment::from(&r.ss),
1796 tr: Segment::from(&r.tr),
1797 ldt: Segment::from(&r.ldt),
1798 gdt: DescriptorTable::from(&r.gdt),
1799 idt: DescriptorTable::from(&r.idt),
1800 cr0: r.cr0,
1801 cr2: r.cr2,
1802 cr3: r.cr3,
1803 cr4: r.cr4,
1804 cr8: r.cr8,
1805 efer: r.efer,
1806 }
1807 }
1808}
1809
1810impl From<&kvm_fpu> for Fpu {
1811 fn from(r: &kvm_fpu) -> Self {
1812 Fpu {
1813 fpr: FpuReg::from_16byte_arrays(&r.fpr),
1814 fcw: r.fcw,
1815 fsw: r.fsw,
1816 ftwx: r.ftwx,
1817 last_opcode: r.last_opcode,
1818 last_ip: r.last_ip,
1819 last_dp: r.last_dp,
1820 xmm: r.xmm,
1821 mxcsr: r.mxcsr,
1822 }
1823 }
1824}
1825
1826impl From<&Fpu> for kvm_fpu {
1827 fn from(r: &Fpu) -> Self {
1828 kvm_fpu {
1829 fpr: FpuReg::to_16byte_arrays(&r.fpr),
1830 fcw: r.fcw,
1831 fsw: r.fsw,
1832 ftwx: r.ftwx,
1833 last_opcode: r.last_opcode,
1834 last_ip: r.last_ip,
1835 last_dp: r.last_dp,
1836 xmm: r.xmm,
1837 mxcsr: r.mxcsr,
1838 ..Default::default()
1839 }
1840 }
1841}
1842
1843impl From<&kvm_debugregs> for DebugRegs {
1844 fn from(r: &kvm_debugregs) -> Self {
1845 DebugRegs {
1846 db: r.db,
1847 dr6: r.dr6,
1848 dr7: r.dr7,
1849 }
1850 }
1851}
1852
1853impl From<&DebugRegs> for kvm_debugregs {
1854 fn from(r: &DebugRegs) -> Self {
1855 kvm_debugregs {
1856 db: r.db,
1857 dr6: r.dr6,
1858 dr7: r.dr7,
1859 ..Default::default()
1860 }
1861 }
1862}
1863
1864#[cfg(test)]
1865mod tests {
1866 use super::*;
1867
1868 #[test]
1869 fn vcpu_event_to_from() {
1870 let mut kvm_ve: kvm_vcpu_events = Default::default();
1872 kvm_ve.exception.injected = 1;
1873 kvm_ve.exception.nr = 65;
1874 kvm_ve.exception.has_error_code = 1;
1875 kvm_ve.exception.error_code = 110;
1876 kvm_ve.exception.pending = 1;
1877
1878 kvm_ve.interrupt.injected = 1;
1879 kvm_ve.interrupt.nr = 100;
1880 kvm_ve.interrupt.soft = 1;
1881 kvm_ve.interrupt.shadow = 114;
1882
1883 kvm_ve.nmi.injected = 1;
1884 kvm_ve.nmi.pending = 1;
1885 kvm_ve.nmi.masked = 0;
1886
1887 kvm_ve.sipi_vector = 105;
1888
1889 kvm_ve.smi.smm = 1;
1890 kvm_ve.smi.pending = 1;
1891 kvm_ve.smi.smm_inside_nmi = 1;
1892 kvm_ve.smi.latched_init = 100;
1893
1894 kvm_ve.triple_fault.pending = 0;
1895
1896 kvm_ve.exception_payload = 33;
1897 kvm_ve.exception_has_payload = 1;
1898
1899 kvm_ve.flags = 0
1900 | KVM_VCPUEVENT_VALID_PAYLOAD
1901 | KVM_VCPUEVENT_VALID_SMM
1902 | KVM_VCPUEVENT_VALID_NMI_PENDING
1903 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1904 | KVM_VCPUEVENT_VALID_SHADOW;
1905
1906 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1907 assert_eq!(ve.exception.injected, true);
1908 assert_eq!(ve.exception.nr, 65);
1909 assert_eq!(ve.exception.has_error_code, true);
1910 assert_eq!(ve.exception.error_code, 110);
1911 assert_eq!(ve.exception.pending.unwrap(), true);
1912
1913 assert_eq!(ve.interrupt.injected, true);
1914 assert_eq!(ve.interrupt.nr, 100);
1915 assert_eq!(ve.interrupt.soft, true);
1916 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1917
1918 assert_eq!(ve.nmi.injected, true);
1919 assert_eq!(ve.nmi.pending.unwrap(), true);
1920 assert_eq!(ve.nmi.masked, false);
1921
1922 assert_eq!(ve.sipi_vector.unwrap(), 105);
1923
1924 assert_eq!(ve.smi.smm.unwrap(), true);
1925 assert_eq!(ve.smi.pending, true);
1926 assert_eq!(ve.smi.smm_inside_nmi, true);
1927 assert_eq!(ve.smi.latched_init, 100);
1928
1929 assert_eq!(ve.triple_fault.pending, None);
1930
1931 assert_eq!(ve.exception_payload.unwrap(), 33);
1932
1933 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1934 assert_eq!(kvm_ve_restored.exception.injected, 1);
1935 assert_eq!(kvm_ve_restored.exception.nr, 65);
1936 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1937 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1938 assert_eq!(kvm_ve_restored.exception.pending, 1);
1939
1940 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1941 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1942 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1943 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1944
1945 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1946 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1947 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1948
1949 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1950
1951 assert_eq!(kvm_ve_restored.smi.smm, 1);
1952 assert_eq!(kvm_ve_restored.smi.pending, 1);
1953 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1954 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1955
1956 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1957
1958 assert_eq!(kvm_ve_restored.exception_payload, 33);
1959 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1960 }
1961}