@@ -279,7 +279,7 @@ pub unsafe fn kernel_map_mmio(
279
279
// omitted
280
280
281
281
let virt_region =
282
- alloc :: kernel_mmio_va_allocator (). lock (| allocator | allocator . alloc (num_pages ))? ;
282
+ page_alloc :: kernel_mmio_va_allocator (). lock (| allocator | allocator . alloc (num_pages ))? ;
283
283
284
284
kernel_map_at_unchecked (
285
285
name ,
@@ -296,10 +296,11 @@ pub unsafe fn kernel_map_mmio(
296
296
}
297
297
```
298
298
299
- This allocator is defined and implemented in the added file ` src/memory/mmu/alloc.rs ` . Like other
300
- parts of the mapping code, its implementation makes use of the newly introduced ` PageAddress<ATYPE> `
301
- and ` MemoryRegion<ATYPE> ` types (in [ ` src/memory/mmu/types.rs ` ] ( kernel/src/memory/mmu/types.rs ) ),
302
- but apart from that is rather straight forward. Therefore, it won't be covered in details here.
299
+ This allocator is defined and implemented in the added file ` src/memory/mmu/paeg_alloc.rs ` . Like
300
+ other parts of the mapping code, its implementation makes use of the newly introduced
301
+ ` PageAddress<ATYPE> ` and ` MemoryRegion<ATYPE> ` types (in
302
+ [ ` src/memory/mmu/types.rs ` ] ( kernel/src/memory/mmu/types.rs ) ), but apart from that is rather straight
303
+ forward. Therefore, it won't be covered in details here.
303
304
304
305
The more interesting question is: How does the allocator get to learn which VAs it can use?
305
306
@@ -313,7 +314,7 @@ been turned on.
313
314
fn kernel_init_mmio_va_allocator () {
314
315
let region = bsp :: memory :: mmu :: virt_mmio_remap_region ();
315
316
316
- alloc :: kernel_mmio_va_allocator (). lock (| allocator | allocator . initialize (region ));
317
+ page_alloc :: kernel_mmio_va_allocator (). lock (| allocator | allocator . initialize (region ));
317
318
}
318
319
```
319
320
@@ -2227,81 +2228,6 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/main.rs 14_virtual_mem_
2227
2228
let (_, privilege_level) = exception::current_privilege_level();
2228
2229
info!("Current privilege level: {}", privilege_level);
2229
2230
2230
- diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/alloc.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs
2231
- --- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/alloc.rs
2232
- +++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/alloc.rs
2233
- @@ -0,0 +1,70 @@
2234
- + // SPDX-License-Identifier: MIT OR Apache-2.0
2235
- + //
2236
- + // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
2237
- +
2238
- + //! Allocation.
2239
- +
2240
- + use super::MemoryRegion;
2241
- + use crate::{
2242
- + memory::{AddressType, Virtual},
2243
- + synchronization::IRQSafeNullLock,
2244
- + warn,
2245
- + };
2246
- + use core::num::NonZeroUsize;
2247
- +
2248
- + //--------------------------------------------------------------------------------------------------
2249
- + // Public Definitions
2250
- + //--------------------------------------------------------------------------------------------------
2251
- +
2252
- + /// A page allocator that can be lazyily initialized.
2253
- + pub struct PageAllocator<ATYPE: AddressType> {
2254
- + pool: Option<MemoryRegion<ATYPE>>,
2255
- + }
2256
- +
2257
- + //--------------------------------------------------------------------------------------------------
2258
- + // Global instances
2259
- + //--------------------------------------------------------------------------------------------------
2260
- +
2261
- + static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
2262
- + IRQSafeNullLock::new(PageAllocator::new());
2263
- +
2264
- + //--------------------------------------------------------------------------------------------------
2265
- + // Public Code
2266
- + //--------------------------------------------------------------------------------------------------
2267
- +
2268
- + /// Return a reference to the kernel's MMIO virtual address allocator.
2269
- + pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
2270
- + &KERNEL_MMIO_VA_ALLOCATOR
2271
- + }
2272
- +
2273
- + impl<ATYPE: AddressType> PageAllocator<ATYPE> {
2274
- + /// Create an instance.
2275
- + pub const fn new() -> Self {
2276
- + Self { pool: None }
2277
- + }
2278
- +
2279
- + /// Initialize the allocator.
2280
- + pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
2281
- + if self.pool.is_some() {
2282
- + warn!("Already initialized");
2283
- + return;
2284
- + }
2285
- +
2286
- + self.pool = Some(pool);
2287
- + }
2288
- +
2289
- + /// Allocate a number of pages.
2290
- + pub fn alloc(
2291
- + &mut self,
2292
- + num_requested_pages: NonZeroUsize,
2293
- + ) -> Result<MemoryRegion<ATYPE>, &'static str> {
2294
- + if self.pool.is_none() {
2295
- + return Err("Allocator not initialized");
2296
- + }
2297
- +
2298
- + self.pool
2299
- + .as_mut()
2300
- + .unwrap()
2301
- + .take_first_n_pages(num_requested_pages)
2302
- + }
2303
- + }
2304
-
2305
2231
diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_record.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/mapping_record.rs
2306
2232
--- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_record.rs
2307
2233
+++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/mapping_record.rs
@@ -2540,6 +2466,81 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/mapping_reco
2540
2466
+ KERNEL_MAPPING_RECORD.read(|mr| mr.print());
2541
2467
+ }
2542
2468
2469
+ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/page_alloc.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs
2470
+ --- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/page_alloc.rs
2471
+ +++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/page_alloc.rs
2472
+ @@ -0,0 +1,70 @@
2473
+ + // SPDX-License-Identifier: MIT OR Apache-2.0
2474
+ + //
2475
+ + // Copyright (c) 2021-2022 Andre Richter <andre.o.richter@gmail.com>
2476
+ +
2477
+ + //! Page allocation.
2478
+ +
2479
+ + use super::MemoryRegion;
2480
+ + use crate::{
2481
+ + memory::{AddressType, Virtual},
2482
+ + synchronization::IRQSafeNullLock,
2483
+ + warn,
2484
+ + };
2485
+ + use core::num::NonZeroUsize;
2486
+ +
2487
+ + //--------------------------------------------------------------------------------------------------
2488
+ + // Public Definitions
2489
+ + //--------------------------------------------------------------------------------------------------
2490
+ +
2491
+ + /// A page allocator that can be lazyily initialized.
2492
+ + pub struct PageAllocator<ATYPE: AddressType> {
2493
+ + pool: Option<MemoryRegion<ATYPE>>,
2494
+ + }
2495
+ +
2496
+ + //--------------------------------------------------------------------------------------------------
2497
+ + // Global instances
2498
+ + //--------------------------------------------------------------------------------------------------
2499
+ +
2500
+ + static KERNEL_MMIO_VA_ALLOCATOR: IRQSafeNullLock<PageAllocator<Virtual>> =
2501
+ + IRQSafeNullLock::new(PageAllocator::new());
2502
+ +
2503
+ + //--------------------------------------------------------------------------------------------------
2504
+ + // Public Code
2505
+ + //--------------------------------------------------------------------------------------------------
2506
+ +
2507
+ + /// Return a reference to the kernel's MMIO virtual address allocator.
2508
+ + pub fn kernel_mmio_va_allocator() -> &'static IRQSafeNullLock<PageAllocator<Virtual>> {
2509
+ + &KERNEL_MMIO_VA_ALLOCATOR
2510
+ + }
2511
+ +
2512
+ + impl<ATYPE: AddressType> PageAllocator<ATYPE> {
2513
+ + /// Create an instance.
2514
+ + pub const fn new() -> Self {
2515
+ + Self { pool: None }
2516
+ + }
2517
+ +
2518
+ + /// Initialize the allocator.
2519
+ + pub fn initialize(&mut self, pool: MemoryRegion<ATYPE>) {
2520
+ + if self.pool.is_some() {
2521
+ + warn!("Already initialized");
2522
+ + return;
2523
+ + }
2524
+ +
2525
+ + self.pool = Some(pool);
2526
+ + }
2527
+ +
2528
+ + /// Allocate a number of pages.
2529
+ + pub fn alloc(
2530
+ + &mut self,
2531
+ + num_requested_pages: NonZeroUsize,
2532
+ + ) -> Result<MemoryRegion<ATYPE>, &'static str> {
2533
+ + if self.pool.is_none() {
2534
+ + return Err("Allocator not initialized");
2535
+ + }
2536
+ +
2537
+ + self.pool
2538
+ + .as_mut()
2539
+ + .unwrap()
2540
+ + .take_first_n_pages(num_requested_pages)
2541
+ + }
2542
+ + }
2543
+
2543
2544
diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/translation_table.rs 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/translation_table.rs
2544
2545
--- 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu/translation_table.rs
2545
2546
+++ 14_virtual_mem_part2_mmio_remap/kernel/src/memory/mmu/translation_table.rs
@@ -3037,8 +3038,8 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua
3037
3038
#[path = "../_arch/aarch64/memory/mmu.rs"]
3038
3039
mod arch_mmu;
3039
3040
3040
- + mod alloc;
3041
3041
+ mod mapping_record;
3042
+ + mod page_alloc;
3042
3043
mod translation_table;
3043
3044
+ mod types;
3044
3045
@@ -3140,7 +3141,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua
3140
3141
+ fn kernel_init_mmio_va_allocator() {
3141
3142
+ let region = bsp::memory::mmu::virt_mmio_remap_region();
3142
3143
+
3143
- + alloc ::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
3144
+ + page_alloc ::kernel_mmio_va_allocator().lock(|allocator| allocator.initialize(region));
3144
3145
+ }
3145
3146
+
3146
3147
+ /// Map a region in the kernel's translation tables.
@@ -3280,7 +3281,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua
3280
3281
- "PX"
3281
3282
- };
3282
3283
+ let virt_region =
3283
- + alloc ::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
3284
+ + page_alloc ::kernel_mmio_va_allocator().lock(|allocator| allocator.alloc(num_pages))?;
3284
3285
3285
3286
- write!(
3286
3287
- f,
@@ -3399,7 +3400,7 @@ diff -uNr 13_exceptions_part2_peripheral_IRQs/kernel/src/memory/mmu.rs 14_virtua
3399
3400
+ let phys_region = MemoryRegion::new(phys_start_page_addr, phys_end_exclusive_page_addr);
3400
3401
+
3401
3402
+ let num_pages = NonZeroUsize::new(phys_region.num_pages()).unwrap();
3402
- + let virt_region = alloc ::kernel_mmio_va_allocator()
3403
+ + let virt_region = page_alloc ::kernel_mmio_va_allocator()
3403
3404
+ .lock(|allocator| allocator.alloc(num_pages))
3404
3405
+ .unwrap();
3405
3406
0 commit comments