1use std::collections::btree_map;
8use std::collections::BTreeMap;
9
10use base::pagesize;
11
12use crate::address_allocator::AddressAllocator;
13use crate::address_allocator::AddressAllocatorSet;
14use crate::AddressRange;
15use crate::Alloc;
16use crate::Error;
17use crate::PciAddress;
18use crate::Result;
19
20#[derive(Copy, Clone, PartialEq, Eq)]
24pub enum MmioType {
25 Low,
26 High,
27}
28
29#[derive(Copy, Clone, Debug)]
31pub struct AllocOptions {
32 prefetchable: bool,
33 max_address: u64,
34 alignment: Option<u64>,
35 top_down: bool,
36}
37
38impl Default for AllocOptions {
39 fn default() -> Self {
40 AllocOptions::new()
41 }
42}
43
44impl AllocOptions {
45 pub fn new() -> Self {
46 AllocOptions {
47 prefetchable: false,
48 max_address: u64::MAX,
49 alignment: None,
50 top_down: false,
51 }
52 }
53
54 pub fn prefetchable(&mut self, prefetchable: bool) -> &mut Self {
59 self.prefetchable = prefetchable;
60 self
61 }
62
63 pub fn max_address(&mut self, max_address: u64) -> &mut Self {
68 self.max_address = max_address;
69 self
70 }
71
72 pub fn align(&mut self, alignment: u64) -> &mut Self {
75 self.alignment = Some(alignment);
76 self
77 }
78
79 pub fn top_down(&mut self, top_down: bool) -> &mut Self {
82 self.top_down = top_down;
83 self
84 }
85}
86
87pub struct SystemAllocatorConfig {
88 pub io: Option<AddressRange>,
90 pub low_mmio: AddressRange,
96 pub high_mmio: AddressRange,
102 pub platform_mmio: Option<AddressRange>,
104 pub first_irq: u32,
106}
107
108#[derive(Debug)]
109pub struct SystemAllocator {
110 io_address_space: Option<AddressAllocator>,
111
112 mmio_address_spaces: [AddressAllocator; 2],
114 mmio_platform_address_spaces: Option<AddressAllocator>,
115
116 reserved_region: Option<AddressRange>,
117
118 pci_allocator: BTreeMap<u8, AddressAllocator>,
120 irq_allocator: AddressAllocator,
121 gpe_allocator: AddressAllocator,
122 next_anon_id: usize,
123}
124
125impl SystemAllocator {
126 pub fn new(
136 config: SystemAllocatorConfig,
137 reserve_region_size: Option<u64>,
138 mmio_address_ranges: &[AddressRange],
139 ) -> Result<Self> {
140 let page_size = pagesize() as u64;
141
142 let (high_mmio, reserved_region) = match reserve_region_size {
143 Some(reserved_len) => {
144 let high_mmio_len = config.high_mmio.len().ok_or(Error::OutOfBounds)?;
145 if reserved_len > high_mmio_len {
146 return Err(Error::OutOfSpace);
147 }
148 let reserved_start = config.high_mmio.start;
149 let reserved_end = reserved_start + reserved_len - 1;
150 let high_mmio_start = reserved_end + 1;
151 let high_mmio_end = config.high_mmio.end;
152 (
153 AddressRange {
154 start: high_mmio_start,
155 end: high_mmio_end,
156 },
157 Some(AddressRange {
158 start: reserved_start,
159 end: reserved_end,
160 }),
161 )
162 }
163 None => (config.high_mmio, None),
164 };
165
166 let intersect_mmio_range = |src_range: AddressRange| -> Result<Vec<AddressRange>> {
167 Ok(if mmio_address_ranges.is_empty() {
168 vec![src_range]
169 } else {
170 mmio_address_ranges
171 .iter()
172 .map(|r| r.intersect(src_range))
173 .collect()
174 })
175 };
176
177 Ok(SystemAllocator {
178 io_address_space: if let Some(io) = config.io {
179 if io.end > 0xffff {
182 return Err(Error::IOPortOutOfRange(io));
183 }
184 Some(AddressAllocator::new(io, Some(0x400), None)?)
185 } else {
186 None
187 },
188 mmio_address_spaces: [
189 AddressAllocator::new_from_list(
191 intersect_mmio_range(config.low_mmio)?,
192 Some(page_size),
193 None,
194 )?,
195 AddressAllocator::new_from_list(
197 intersect_mmio_range(high_mmio)?,
198 Some(page_size),
199 None,
200 )?,
201 ],
202
203 pci_allocator: BTreeMap::new(),
204
205 mmio_platform_address_spaces: if let Some(platform) = config.platform_mmio {
206 Some(AddressAllocator::new(platform, Some(page_size), None)?)
207 } else {
208 None
209 },
210
211 reserved_region,
212
213 irq_allocator: AddressAllocator::new(
214 AddressRange {
215 start: config.first_irq as u64,
216 end: 1023,
217 },
218 Some(1),
219 None,
220 )?,
221
222 gpe_allocator: AddressAllocator::new(
228 AddressRange { start: 0, end: 255 },
229 Some(1),
230 None,
231 )?,
232 next_anon_id: 0,
233 })
234 }
235
236 pub fn allocate_irq(&mut self) -> Option<u32> {
238 let id = self.get_anon_alloc();
239 self.irq_allocator
240 .allocate(1, id, "irq-auto".to_string())
241 .map(|v| v as u32)
242 .ok()
243 }
244
245 pub fn release_irq(&mut self, irq: u32) {
247 let _ = self.irq_allocator.release_containing(irq.into());
248 }
249
250 pub fn reserve_irq(&mut self, irq: u32) -> bool {
252 let id = self.get_anon_alloc();
253 self.irq_allocator
254 .allocate_at(
255 AddressRange {
256 start: irq.into(),
257 end: irq.into(),
258 },
259 id,
260 "irq-fixed".to_string(),
261 )
262 .is_ok()
263 }
264
265 pub fn allocate_gpe(&mut self) -> Option<u32> {
267 let id = self.get_anon_alloc();
268 self.gpe_allocator
269 .allocate(1, id, "gpe-auto".to_string())
270 .map(|v| v as u32)
271 .ok()
272 }
273
274 fn get_pci_allocator_mut(&mut self, bus: u8) -> Option<&mut AddressAllocator> {
275 match self.pci_allocator.entry(bus) {
276 btree_map::Entry::Occupied(entry) => Some(entry.into_mut()),
277 btree_map::Entry::Vacant(entry) => {
278 let base = if bus == 0 { 8 } else { 0 };
281
282 let pci_alloc = AddressAllocator::new(
286 AddressRange {
287 start: base,
288 end: (32 * 8) - 1,
289 },
290 Some(1),
291 Some(8),
292 )
293 .ok()?;
294
295 Some(entry.insert(pci_alloc))
296 }
297 }
298 }
299
300 pub fn pci_bus_empty(&self, bus: u8) -> bool {
302 !self.pci_allocator.contains_key(&bus)
303 }
304
305 pub fn allocate_pci(&mut self, bus: u8, tag: String) -> Option<PciAddress> {
307 let id = self.get_anon_alloc();
308 let allocator = self.get_pci_allocator_mut(bus)?;
309 allocator
310 .allocate(1, id, tag)
311 .map(|v| PciAddress {
312 bus,
313 dev: (v >> 3) as u8,
314 func: (v & 7) as u8,
315 })
316 .ok()
317 }
318
319 pub fn reserve_pci(&mut self, pci_addr: PciAddress, tag: String) -> bool {
321 let id = self.get_anon_alloc();
322
323 let allocator = match self.get_pci_allocator_mut(pci_addr.bus) {
324 Some(v) => v,
325 None => return false,
326 };
327 let df = ((pci_addr.dev as u64) << 3) | (pci_addr.func as u64);
328 allocator
329 .allocate_at(AddressRange { start: df, end: df }, id, tag)
330 .is_ok()
331 }
332
333 pub fn release_pci(&mut self, pci_addr: PciAddress) -> bool {
335 let allocator = match self.get_pci_allocator_mut(pci_addr.bus) {
336 Some(v) => v,
337 None => return false,
338 };
339 let df = ((pci_addr.dev as u64) << 3) | (pci_addr.func as u64);
340 allocator.release_containing(df).is_ok()
341 }
342
343 pub fn allocate_mmio(
345 &mut self,
346 size: u64,
347 alloc: Alloc,
348 tag: String,
349 opts: &AllocOptions,
350 ) -> Result<u64> {
351 if opts.max_address < u32::MAX as u64 {
354 return Err(Error::OutOfSpace);
355 }
356
357 let mut mmio_type = MmioType::High;
358 if opts.max_address < u64::MAX || !opts.prefetchable {
359 mmio_type = MmioType::Low;
360 }
361
362 let res = self.allocate_mmio_internal(size, alloc, tag.clone(), opts, mmio_type);
363 if mmio_type == MmioType::High && matches!(res, Err(Error::OutOfSpace)) {
366 self.allocate_mmio_internal(size, alloc, tag, opts, MmioType::Low)
367 } else {
368 res
369 }
370 }
371
372 fn allocate_mmio_internal(
373 &mut self,
374 size: u64,
375 alloc: Alloc,
376 tag: String,
377 opts: &AllocOptions,
378 mmio_type: MmioType,
379 ) -> Result<u64> {
380 let allocator = &mut self.mmio_address_spaces[mmio_type as usize];
381 match (opts.alignment, opts.top_down) {
382 (Some(align), true) => allocator.reverse_allocate_with_align(size, alloc, tag, align),
383 (Some(align), false) => allocator.allocate_with_align(size, alloc, tag, align),
384 (None, true) => allocator.reverse_allocate(size, alloc, tag),
385 (None, false) => allocator.allocate(size, alloc, tag),
386 }
387 }
388
389 pub fn reserve_mmio(&mut self, range: AddressRange) -> Result<()> {
394 let mut pools = Vec::new();
395 for pool in self.mmio_pools() {
396 pools.push(*pool);
397 }
398 pools.sort_by(|a, b| a.start.cmp(&b.start));
399 for pool in &pools {
400 if pool.start > range.end {
401 break;
402 }
403
404 let overlap = pool.intersect(range);
405 if !overlap.is_empty() {
406 let id = self.get_anon_alloc();
407 self.mmio_allocator_any().allocate_at(
408 overlap,
409 id,
410 "pci mmio reserve".to_string(),
411 )?;
412 }
413 }
414
415 Ok(())
416 }
417
418 pub fn mmio_platform_allocator(&mut self) -> Option<&mut AddressAllocator> {
420 self.mmio_platform_address_spaces.as_mut()
421 }
422
423 pub fn io_allocator(&mut self) -> Option<&mut AddressAllocator> {
425 self.io_address_space.as_mut()
426 }
427
428 pub fn mmio_allocator(&mut self, mmio_type: MmioType) -> &mut AddressAllocator {
432 &mut self.mmio_address_spaces[mmio_type as usize]
433 }
434
435 pub fn mmio_allocator_any(&mut self) -> AddressAllocatorSet {
438 AddressAllocatorSet::new(&mut self.mmio_address_spaces)
439 }
440
441 pub fn mmio_pools(&self) -> Vec<&AddressRange> {
443 self.mmio_address_spaces
444 .iter()
445 .flat_map(|mmio_as| mmio_as.pools())
446 .collect()
447 }
448
449 pub fn reserved_region(&self) -> Option<AddressRange> {
451 self.reserved_region
452 }
453
454 pub fn get_anon_alloc(&mut self) -> Alloc {
456 self.next_anon_id += 1;
457 Alloc::Anon(self.next_anon_id)
458 }
459}
460
461#[cfg(test)]
462mod tests {
463 use super::*;
464
465 #[test]
466 fn example() {
467 let mut a = SystemAllocator::new(
468 SystemAllocatorConfig {
469 io: Some(AddressRange {
470 start: 0x1000,
471 end: 0xffff,
472 }),
473 low_mmio: AddressRange {
474 start: 0x3000_0000,
475 end: 0x3000_ffff,
476 },
477 high_mmio: AddressRange {
478 start: 0x1000_0000,
479 end: 0x1fffffff,
480 },
481 platform_mmio: None,
482 first_irq: 5,
483 },
484 None,
485 &[],
486 )
487 .unwrap();
488
489 assert_eq!(a.allocate_irq(), Some(5));
490 assert_eq!(a.allocate_irq(), Some(6));
491 assert_eq!(a.allocate_gpe(), Some(0));
492 assert_eq!(a.allocate_gpe(), Some(1));
493 assert_eq!(
494 a.mmio_allocator(MmioType::High).allocate(
495 0x100,
496 Alloc::PciBar {
497 bus: 0,
498 dev: 0,
499 func: 0,
500 bar: 0
501 },
502 "bar0".to_string()
503 ),
504 Ok(0x10000000)
505 );
506 assert_eq!(
507 a.mmio_allocator(MmioType::High).get(&Alloc::PciBar {
508 bus: 0,
509 dev: 0,
510 func: 0,
511 bar: 0
512 }),
513 Some(&(
514 AddressRange {
515 start: 0x10000000,
516 end: 0x100000ff
517 },
518 "bar0".to_string()
519 ))
520 );
521
522 let id = a.get_anon_alloc();
523 assert_eq!(
524 a.mmio_allocator(MmioType::Low).allocate_at(
525 AddressRange {
526 start: 0x3000_5000,
527 end: 0x30009fff
528 },
529 id,
530 "Test".to_string()
531 ),
532 Ok(())
533 );
534 assert_eq!(
535 a.mmio_allocator(MmioType::Low).release(id),
536 Ok(AddressRange {
537 start: 0x3000_5000,
538 end: 0x30009fff
539 })
540 );
541 assert_eq!(
542 a.reserve_mmio(AddressRange {
543 start: 0x3000_2000,
544 end: 0x30005fff
545 }),
546 Ok(())
547 );
548 assert_eq!(
549 a.mmio_allocator(MmioType::Low)
550 .allocate_at(
551 AddressRange {
552 start: 0x3000_5000,
553 end: 0x3000_9fff
554 },
555 id,
556 "Test".to_string()
557 )
558 .is_err(),
559 true
560 );
561 }
562}