1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
// Copyright 2017 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

use std::collections::BTreeMap;
use std::mem;
use std::result;

use base::warn;
use hypervisor::Sregs;
use hypervisor::VcpuX86_64;
use hypervisor::Vm;
use remain::sorted;
use thiserror::Error;
use vm_memory::GuestAddress;
use vm_memory::GuestMemory;

use crate::gdt;

#[sorted]
#[derive(Error, Debug)]
pub enum Error {
    /// Failed to get sregs for this cpu.
    #[error("failed to get sregs for this cpu: {0}")]
    GetSRegsIoctlFailed(base::Error),
    /// Failed to get base registers for this cpu.
    #[error("failed to get base registers for this cpu: {0}")]
    GettingRegistersIoctl(base::Error),
    /// Failed to set sregs for this cpu.
    #[error("failed to set sregs for this cpu: {0}")]
    SetSRegsIoctlFailed(base::Error),
    /// Failed to set base registers for this cpu.
    #[error("failed to set base registers for this cpu: {0}")]
    SettingRegistersIoctl(base::Error),
    /// Writing the GDT to RAM failed.
    #[error("writing the GDT to RAM failed")]
    WriteGDTFailure,
    /// Writing the IDT to RAM failed.
    #[error("writing the IDT to RAM failed")]
    WriteIDTFailure,
    /// Writing PDE to RAM failed.
    #[error("writing PDE to RAM failed")]
    WritePDEAddress,
    /// Writing PDPTE to RAM failed.
    #[error("writing PDPTE to RAM failed")]
    WritePDPTEAddress,
    /// Writing PML4 to RAM failed.
    #[error("writing PML4 to RAM failed")]
    WritePML4Address,
}

pub type Result<T> = result::Result<T, Error>;

const MTRR_MEMTYPE_UC: u8 = 0x0;
const MTRR_MEMTYPE_WB: u8 = 0x6;
const MTRR_VAR_VALID: u64 = 0x800;
const MTRR_ENABLE: u64 = 0x800;
const MTRR_PHYS_BASE_MSR: u32 = 0x200;
const MTRR_PHYS_MASK_MSR: u32 = 0x201;
const VAR_MTRR_NUM_MASK: u64 = 0xFF;

// Returns the value of the highest bit in a 64-bit value. Equivalent to
// 1 << HighBitSet(x)
fn get_power_of_two(data: u64) -> u64 {
    1 << (64 - data.leading_zeros() - 1)
}

// Returns the max length which suitable for mtrr setting based on the
// specified (base, len)
fn get_max_len(base: u64, len: u64) -> u64 {
    let mut ret = get_power_of_two(len);

    while base % ret != 0 {
        ret >>= 1;
    }

    ret
}

// For the specified (Base, Len), returns (base, len) pair which could be
// set into mtrr register. mtrr requires: the base-address alignment value can't be
// less than its length
fn get_mtrr_pairs(base: u64, len: u64) -> Vec<(u64, u64)> {
    let mut vecs = Vec::new();

    let mut remains = len;
    let mut new = base;
    while remains != 0 {
        let max = get_max_len(new, remains);
        vecs.push((new, max));
        remains -= max;
        new += max;
    }

    vecs
}

/// Returns the number of variable MTRR entries supported by `vcpu`.
pub fn vcpu_supported_variable_mtrrs(vcpu: &dyn VcpuX86_64) -> usize {
    // Get VAR MTRR num from MSR_MTRRcap
    match vcpu.get_msr(crate::msr_index::MSR_MTRRcap) {
        Ok(value) => (value & VAR_MTRR_NUM_MASK) as usize,
        Err(_e) => {
            warn!("failed to get MSR_MTRRcap, guests with passthrough devices may be very slow");
            0
        }
    }
}

/// Returns `true` if the given MSR `id` is a MTRR entry.
pub fn is_mtrr_msr(id: u32) -> bool {
    // Variable MTRR MSRs are pairs starting at 0x200 (MTRR_PHYS_BASE_MSR) / 0x201
    // (MTRR_PHYS_MASK_MSR) and extending up to 0xFF pairs at most.
    (id >= MTRR_PHYS_BASE_MSR && id <= MTRR_PHYS_BASE_MSR + 2 * VAR_MTRR_NUM_MASK as u32)
        || id == crate::msr_index::MSR_MTRRdefType
}

/// Returns the count of variable MTRR entries specified by the list of `msrs`.
pub fn count_variable_mtrrs(msrs: &BTreeMap<u32, u64>) -> usize {
    // Each variable MTRR takes up two MSRs (base + mask), so divide by 2. This will also count the
    // MTRRdefType entry, but that is only one extra and the division truncates, so it won't affect
    // the final count.
    msrs.keys().filter(|&msr| is_mtrr_msr(*msr)).count() / 2
}

/// Returns a set of MSRs containing the MTRR configuration.
pub fn set_mtrr_msrs(msrs: &mut BTreeMap<u32, u64>, vm: &dyn Vm, pci_start: u64) {
    // Set pci_start .. 4G as UC
    // all others are set to default WB
    let pci_len = (1 << 32) - pci_start;
    let vecs = get_mtrr_pairs(pci_start, pci_len);

    let phys_mask: u64 = (1 << vm.get_guest_phys_addr_bits()) - 1;
    for (idx, (base, len)) in vecs.iter().enumerate() {
        let reg_idx = idx as u32 * 2;
        msrs.insert(MTRR_PHYS_BASE_MSR + reg_idx, base | MTRR_MEMTYPE_UC as u64);
        let mask: u64 = len.wrapping_neg() & phys_mask | MTRR_VAR_VALID;
        msrs.insert(MTRR_PHYS_MASK_MSR + reg_idx, mask);
    }
    // Disable fixed MTRRs and enable variable MTRRs, set default type as WB
    msrs.insert(
        crate::msr_index::MSR_MTRRdefType,
        MTRR_ENABLE | MTRR_MEMTYPE_WB as u64,
    );
}

/// Returns the default value of MSRs at reset.
///
/// Currently only sets IA32_TSC to 0.
pub fn set_default_msrs(msrs: &mut BTreeMap<u32, u64>) {
    msrs.insert(crate::msr_index::MSR_IA32_TSC, 0x0);
    msrs.insert(
        crate::msr_index::MSR_IA32_MISC_ENABLE,
        crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
    );
}

/// Configure Model specific registers for long (64-bit) mode.
pub fn set_long_mode_msrs(msrs: &mut BTreeMap<u32, u64>) {
    msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_CS, 0x0);
    msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_ESP, 0x0);
    msrs.insert(crate::msr_index::MSR_IA32_SYSENTER_EIP, 0x0);

    // x86_64 specific msrs, we only run on x86_64 not x86
    msrs.insert(crate::msr_index::MSR_STAR, 0x0);
    msrs.insert(crate::msr_index::MSR_CSTAR, 0x0);
    msrs.insert(crate::msr_index::MSR_KERNEL_GS_BASE, 0x0);
    msrs.insert(crate::msr_index::MSR_SYSCALL_MASK, 0x0);
    msrs.insert(crate::msr_index::MSR_LSTAR, 0x0);
    // end of x86_64 specific code

    msrs.insert(crate::msr_index::MSR_IA32_TSC, 0x0);
    msrs.insert(
        crate::msr_index::MSR_IA32_MISC_ENABLE,
        crate::msr_index::MSR_IA32_MISC_ENABLE_FAST_STRING as u64,
    );
}

const X86_CR0_PE: u64 = 0x1;
const X86_CR0_PG: u64 = 0x80000000;
const X86_CR4_PAE: u64 = 0x20;

const EFER_LME: u64 = 0x100;
const EFER_LMA: u64 = 0x400;

const BOOT_GDT_OFFSET: u64 = 0x1500;
const BOOT_IDT_OFFSET: u64 = 0x1528;

fn write_gdt_table(table: &[u64], guest_mem: &GuestMemory) -> Result<()> {
    let boot_gdt_addr = GuestAddress(BOOT_GDT_OFFSET);
    for (index, entry) in table.iter().enumerate() {
        let addr = boot_gdt_addr
            .checked_add((index * mem::size_of::<u64>()) as u64)
            .ok_or(Error::WriteGDTFailure)?;
        if !guest_mem.is_valid_range(addr, mem::size_of::<u64>() as u64) {
            return Err(Error::WriteGDTFailure);
        }

        guest_mem
            .write_obj_at_addr(*entry, addr)
            .map_err(|_| Error::WriteGDTFailure)?;
    }
    Ok(())
}

fn write_idt_value(val: u64, guest_mem: &GuestMemory) -> Result<()> {
    let boot_idt_addr = GuestAddress(BOOT_IDT_OFFSET);
    guest_mem
        .write_obj_at_addr(val, boot_idt_addr)
        .map_err(|_| Error::WriteIDTFailure)
}

/// Configures the GDT, IDT, and segment registers for long mode.
pub fn configure_segments_and_sregs(mem: &GuestMemory, sregs: &mut Sregs) -> Result<()> {
    // reference: https://docs.kernel.org/arch/x86/boot.html?highlight=__BOOT_CS#id1
    let gdt_table: [u64; 6] = [
        gdt::gdt_entry(0, 0, 0),            // NULL
        gdt::gdt_entry(0, 0, 0),            // NULL
        gdt::gdt_entry(0xa09b, 0, 0xfffff), // CODE
        gdt::gdt_entry(0xc093, 0, 0xfffff), // DATA
        gdt::gdt_entry(0x808b, 0, 0xfffff), // TSS
        0,                                  // TSS (upper 32 bits of base)
    ];

    let code_seg = gdt::segment_from_gdt(gdt_table[2], 2);
    let data_seg = gdt::segment_from_gdt(gdt_table[3], 3);
    let tss_seg = gdt::segment_from_gdt(gdt_table[4], 4);

    // Write segments
    write_gdt_table(&gdt_table[..], mem)?;
    sregs.gdt.base = BOOT_GDT_OFFSET;
    sregs.gdt.limit = mem::size_of_val(&gdt_table) as u16 - 1;

    write_idt_value(0, mem)?;
    sregs.idt.base = BOOT_IDT_OFFSET;
    sregs.idt.limit = mem::size_of::<u64>() as u16 - 1;

    sregs.cs = code_seg;
    sregs.ds = data_seg;
    sregs.es = data_seg;
    sregs.fs = data_seg;
    sregs.gs = data_seg;
    sregs.ss = data_seg;
    sregs.tr = tss_seg;

    /* 64-bit protected mode */
    sregs.cr0 |= X86_CR0_PE;
    sregs.efer |= EFER_LME;

    Ok(())
}

/// Configures the GDT, IDT, and segment registers for 32-bit protected mode with paging disabled.
pub fn configure_segments_and_sregs_flat32(mem: &GuestMemory, sregs: &mut Sregs) -> Result<()> {
    // reference: https://docs.kernel.org/arch/x86/boot.html?highlight=__BOOT_CS#id1
    let gdt_table: [u64; 5] = [
        gdt::gdt_entry(0, 0, 0),            // NULL
        gdt::gdt_entry(0, 0, 0),            // NULL
        gdt::gdt_entry(0xc09b, 0, 0xfffff), // CODE
        gdt::gdt_entry(0xc093, 0, 0xfffff), // DATA
        gdt::gdt_entry(0x808b, 0, 0xfffff), // TSS
    ];

    let code_seg = gdt::segment_from_gdt(gdt_table[2], 2);
    let data_seg = gdt::segment_from_gdt(gdt_table[3], 3);
    let tss_seg = gdt::segment_from_gdt(gdt_table[4], 4);

    // Write segments
    write_gdt_table(&gdt_table[..], mem)?;
    sregs.gdt.base = BOOT_GDT_OFFSET;
    sregs.gdt.limit = mem::size_of_val(&gdt_table) as u16 - 1;

    write_idt_value(0, mem)?;
    sregs.idt.base = BOOT_IDT_OFFSET;
    sregs.idt.limit = mem::size_of::<u64>() as u16 - 1;

    sregs.cs = code_seg;
    sregs.ds = data_seg;
    sregs.es = data_seg;
    sregs.fs = data_seg;
    sregs.gs = data_seg;
    sregs.ss = data_seg;
    sregs.tr = tss_seg;

    /* 32-bit protected mode with paging disabled */
    sregs.cr0 |= X86_CR0_PE;
    sregs.cr0 &= !X86_CR0_PG;

    Ok(())
}

/// Configures the system page tables and control registers for long mode with paging.
/// Prepares identity mapping for the low 4GB memory.
pub fn setup_page_tables(mem: &GuestMemory, sregs: &mut Sregs) -> Result<()> {
    // Puts PML4 right after zero page but aligned to 4k.
    let boot_pml4_addr = GuestAddress(0x9000);
    let boot_pdpte_addr = GuestAddress(0xa000);
    let boot_pde_addr = GuestAddress(0xb000);

    const PDE_FLAGS_TABLE_REFERENCE: u64 = 0x03; // Present | Read/Write
    const PDE_FLAGS_PAGE_MAPPING: u64 = 0x83; // Present | Read/Write | Page Size

    // Entry covering VA [0..512GB)
    mem.write_obj_at_addr(
        boot_pdpte_addr.offset() | PDE_FLAGS_TABLE_REFERENCE,
        boot_pml4_addr,
    )
    .map_err(|_| Error::WritePML4Address)?;

    // Identity mapping for VA [0..4GB)
    for i in 0..4 {
        let pde_addr = boot_pde_addr.unchecked_add(i * 0x1000);

        // Entry covering a single 1GB VA area
        mem.write_obj_at_addr(
            pde_addr.offset() | PDE_FLAGS_TABLE_REFERENCE,
            boot_pdpte_addr.unchecked_add(i * 8),
        )
        .map_err(|_| Error::WritePDPTEAddress)?;

        // 512 2MB entries together covering a single 1GB VA area. Note we are assuming
        // CPU supports 2MB pages (/proc/cpuinfo has 'pse'). All modern CPUs do.
        for j in 0..512 {
            mem.write_obj_at_addr(
                (i << 30) | (j << 21) | PDE_FLAGS_PAGE_MAPPING,
                pde_addr.unchecked_add(j * 8),
            )
            .map_err(|_| Error::WritePDEAddress)?;
        }
    }

    sregs.cr3 = boot_pml4_addr.offset();
    sregs.cr4 |= X86_CR4_PAE;
    sregs.cr0 |= X86_CR0_PG;
    sregs.efer |= EFER_LMA; // Long mode is active. Must be auto-enabled with CR0_PG.
    Ok(())
}

#[cfg(test)]
mod tests {
    use vm_memory::GuestAddress;
    use vm_memory::GuestMemory;

    use super::*;

    fn create_guest_mem() -> GuestMemory {
        GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap()
    }

    fn read_u64(gm: &GuestMemory, offset: u64) -> u64 {
        let read_addr = GuestAddress(offset);
        gm.read_obj_from_addr(read_addr).unwrap()
    }

    #[test]
    fn segments_and_sregs() {
        let mut sregs = Default::default();
        let gm = create_guest_mem();
        configure_segments_and_sregs(&gm, &mut sregs).unwrap();

        assert_eq!(0x0, read_u64(&gm, BOOT_GDT_OFFSET));
        assert_eq!(0xaf9b000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 0x10));
        assert_eq!(0xcf93000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 0x18));
        assert_eq!(0x8f8b000000ffff, read_u64(&gm, BOOT_GDT_OFFSET + 0x20));
        assert_eq!(0x0, read_u64(&gm, BOOT_IDT_OFFSET));

        assert_eq!(0, sregs.cs.base);
        assert_eq!(0xffffffff, sregs.ds.limit_bytes);
        assert_eq!(0x10, sregs.cs.selector);
        assert_eq!(0x18, sregs.ds.selector);
        assert_eq!(0x18, sregs.es.selector);
        assert_eq!(0x18, sregs.ss.selector);
        assert_eq!(1, sregs.fs.present);
        assert_eq!(1, sregs.gs.g);
        assert_eq!(0, sregs.ss.avl);
        assert_eq!(0, sregs.tr.base);
        assert_eq!(0xffffffff, sregs.tr.limit_bytes);
        assert_eq!(0, sregs.tr.avl);
        assert_eq!(X86_CR0_PE, sregs.cr0 & X86_CR0_PE);
        assert_eq!(EFER_LME, sregs.efer);
    }

    #[test]
    fn page_tables() {
        let mut sregs = Default::default();
        let gm = create_guest_mem();
        setup_page_tables(&gm, &mut sregs).unwrap();

        assert_eq!(0xa003, read_u64(&gm, 0x9000));
        assert_eq!(0xb003, read_u64(&gm, 0xa000));
        for i in 0..512 {
            assert_eq!((i << 21) + 0x83u64, read_u64(&gm, 0xb000 + i * 8));
        }

        assert_eq!(0x9000, sregs.cr3);
        assert_eq!(X86_CR4_PAE, sregs.cr4);
        assert_eq!(X86_CR0_PG, sregs.cr0 & X86_CR0_PG);
    }
}