diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000000000000000000000000000000000000..c1555cbffa1d665a00a627526a5b3fc1f7705d41 --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,15 @@ +## Build Commands +- cp platforms/xxxx/defconfig .config - Copy platforms defconfig to project root +- `make rootfs` - download and prepare root filesystem (copy defconfig before running) +- `make run` - build and run the project (only on qemu platform copy defconfig before running) +- `make build` - build the project for all platforms (copy defconfig before building) +- `make UNITTEST=y run` - run unit tests on qemu platform(copy defconfig before running) +` `make build V=1` - build the project with verbose output (copy defconfig before building) + +## Code Style +- Use rust fmt for formatting code + +## Workflow +- Before we proceed, please confirm any details you are unclear about with me. +- Run config and build or run commands after making changes +- Commit messages follow conventional commits format diff --git a/Cargo.toml b/Cargo.toml index c23dbfd09ee9e88b22b034bb54707559d0057873..6b1f94c03ca7fe9d868e6ac5a7f351e0a278a6d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "util/kbuild_config", "platforms/bootloader", + "platforms/kbootloader", "tee_apps/sh", "tee_apps/tee_app1", "tee_apps/tee_app2", @@ -71,6 +72,7 @@ slab_allocator = { path = "core/slab_allocator" } # Platform Abstraction Layer kplat = { path = "platforms/kplat" } +kbootloader = { path = "platforms/kbootloader" } aarch64-qemu-virt = { path = "platforms/aarch64-qemu-virt" } x86_64-qemu-virt = { path = "platforms/x86_64-qemu-virt" } x86-csv = { path = "platforms/x86-csv" } diff --git a/Kconfig b/Kconfig index d242d798692ed599804a1117ea3b22cf5dbc8c01..c747b553630f9f2e234092030ea9997bcdbec70d 100644 --- a/Kconfig +++ b/Kconfig @@ -220,6 +220,29 @@ config TIMER_FREQUENCY_HZ help Specify the frequency of the timer in hertz. +if ARCH_AARCH64 + +config BOOT_DEVICE_MM + rangetype "Boot Device Memory Ranges" + default [(usize, usize)] + help + List of 1 GiB-aligned (start, end) physical address intervals to be + mapped as Device memory in the early boot page table. The boot page + table uses L1 block entries (1 GiB granularity), so both start and end + must be multiples of 0x40000000. Typically covers MMIO and UART ranges + needed before the MMU is enabled. + +config BOOT_NORMAL_MM + rangetype "Boot Normal Memory Ranges" + default [(usize, usize)] + help + List of 1 GiB-aligned (start, end) physical address intervals to be + mapped as Normal (Read/Write/Execute) memory in the early boot page + table. Both start and end must be multiples of 0x40000000. Must + cover the physical address range where the kernel image is loaded. + +endif # ARCH_AARCH64 + endmenu source "drivers/Kconfig" diff --git a/api/kapi/src/vfs/dev/dice.rs b/api/kapi/src/vfs/dev/dice.rs index 91f364f50f9b84e33fcb93c43f7ae30e23a9f96f..f3c506dae2281ffedf0b9cac395b1c1ec289f5fe 100644 --- a/api/kapi/src/vfs/dev/dice.rs +++ b/api/kapi/src/vfs/dev/dice.rs @@ -97,12 +97,12 @@ impl DiceNodeInfo<'static> { } buffer }; - let mut handover_buf = vec![0u8; size as usize]; + let mut handover_buf = vec![0u8; size]; let hash: Vec = get_process_hash()?; let handover = dice_main_flow_chain_codehash(&handover_data, &hash, &mut handover_buf) .map_err(|_| KError::InvalidInput)?; let (cdi_attest, cdi_seal, chain) = - dice_parse_handover(&handover).map_err(|_| KError::InvalidInput)?; + dice_parse_handover(handover).map_err(|_| KError::InvalidInput)?; Ok((cdi_attest.to_vec(), cdi_seal.to_vec(), chain.to_vec())) } @@ -163,5 +163,5 @@ pub extern "C" fn get_rand(output: usize, len: usize) -> u32 { let buf = unsafe { core::slice::from_raw_parts_mut(output as *mut u8, len) }; let mut rand = GLOBAL_RAND.lock(); rand.fill_bytes(buf); - return 0; + 0 } diff --git a/arch/kcpu/Cargo.toml b/arch/kcpu/Cargo.toml index 33f5b6f3600f1ae89a8137b8525552f56889955e..aa6f2299c147554e9cf7d5e42036b2f95bd5da83 100644 --- a/arch/kcpu/Cargo.toml +++ b/arch/kcpu/Cargo.toml @@ -19,7 +19,7 @@ arm-el2 = ["karch/arm-el2"] backtrace = { workspace = true } karch = { workspace = true } kplat = { workspace = true } -linkme = "0.3" +linkme = { workspace = true } log = "0.4" cfg-if = "1.0" memaddr = { workspace = true } diff --git a/arch/kcpu/src/aarch64/mod.rs b/arch/kcpu/src/aarch64/mod.rs index 1b326c6129ce5f2f94b92dc3992c50ddd1f6804d..b7ed5752a7ecd1efe5eb28734756453a4355a2de 100644 --- a/arch/kcpu/src/aarch64/mod.rs +++ b/arch/kcpu/src/aarch64/mod.rs @@ -6,7 +6,6 @@ mod ctx; -pub mod boot; pub mod instrs; mod excp; @@ -16,6 +15,22 @@ pub mod userspace; pub use self::ctx::{ExceptionContext as TrapFrame, ExceptionContext, FpState, TaskContext}; +/// Initializes trap handling on the current CPU. +/// +/// In detail, it initializes the exception vector, and sets `TTBR0_EL1` to 0 to +/// block low address access. +pub fn init_trap() { + #[cfg(feature = "uspace")] + crate::userspace_common::init_exception_table(); + unsafe extern "C" { + fn exception_vector_base(); + } + unsafe { + karch::write_trap_vector_base(exception_vector_base as *const () as usize); + karch::write_user_page_table(0.into()); + } +} + #[cfg(all(unittest, target_arch = "aarch64"))] pub mod tests_arch { use unittest::def_test; diff --git a/arch/kcpu/src/x86_64/mod.rs b/arch/kcpu/src/x86_64/mod.rs index 3e26f0a3edf6dfe86bb22d2410fb06a895e64197..1e3f66ceea962816357e9feecf1b633fa8b49fa7 100644 --- a/arch/kcpu/src/x86_64/mod.rs +++ b/arch/kcpu/src/x86_64/mod.rs @@ -12,6 +12,7 @@ pub mod instrs; pub use instrs as asm; pub use karch::hypercall; pub mod boot; +pub use boot::init_trap; mod excp; diff --git a/arch/khal/src/mem.rs b/arch/khal/src/mem.rs index 22ad0ab0cfd1bd9290b7178437aec1b1f7e2372f..6e9edf47e69b6a9b6f1fe0a90259722cc202369b 100644 --- a/arch/khal/src/mem.rs +++ b/arch/khal/src/mem.rs @@ -24,23 +24,6 @@ pub fn memory_regions() -> impl Iterator { ALL_MEM_REGIONS.iter().cloned() } -/// Fills the `.bss` section with zeros. -/// -/// It requires the symbols `_sbss` and `_ebss` to be defined in the linker script. -/// -/// # Safety -/// -/// This function is unsafe because it writes `.bss` section directly. -pub unsafe fn clear_bss() { - unsafe { - core::slice::from_raw_parts_mut( - _sbss as *mut u8, - (_ebss as *mut u8).offset_from_unsigned(_sbss as *mut u8), - ) - .fill(0); - } -} - /// Initializes physical memory regions. pub fn init() { let mut all_regions = Vec::new(); diff --git a/drivers/kdriver/src/virtio.rs b/drivers/kdriver/src/virtio.rs index 4f278d5991df87bd30f26bf82d84c9f7fbd02784..a022241001b4246b1a93074d57fd3caa4e0c6c1e 100644 --- a/drivers/kdriver/src/virtio.rs +++ b/drivers/kdriver/src/virtio.rs @@ -202,11 +202,7 @@ unsafe impl VirtIoHal for VirtIoHalImpl { let ptr = dma_info.cpu_addr; #[cfg(feature = "crosvm")] { - if let Ok(shared_paddr) = usize::try_from(paddr) { - dma_share(shared_paddr, pages * PAGE_SIZE); - } else { - log::error!("dma_alloc share failed: paddr {paddr:#x} does not fit usize"); - } + dma_share(paddr, pages * PAGE_SIZE); } // bus_addr is the physical address for DMA (paddr, ptr) @@ -230,7 +226,7 @@ unsafe impl VirtIoHal for VirtIoHalImpl { unsafe { kdma::deallocate_dma_memory(dma_info, layout) }; #[cfg(feature = "crosvm")] { - dma_unshare(paddr as usize, pages * 0x1000); + dma_unshare(paddr, pages * 0x1000); } 0 } @@ -319,7 +315,7 @@ unsafe impl VirtIoHal for VirtIoHalImpl { // For crosvm, call unshare_dma_buffer before freeing #[cfg(feature = "crosvm")] { - dma_unshare(paddr as usize, aligned_size); + dma_unshare(paddr, aligned_size); } // Free the bounce buffer via kdma diff --git a/init/kruntime/Cargo.toml b/init/kruntime/Cargo.toml index 0d5f24af6ef9d2c5a593450cabf67887defb15ce..0acac9c02a5cce706959e8c20b6f1c40e4a1d9d7 100644 --- a/init/kruntime/Cargo.toml +++ b/init/kruntime/Cargo.toml @@ -41,6 +41,7 @@ kdma.workspace = true memaddr.workspace = true kfs = { workspace = true, optional = true } khal.workspace = true +kcpu.workspace = true kipi = { workspace = true, optional = true } klogger.workspace = true memspace = { workspace = true, optional = true } @@ -55,3 +56,5 @@ indoc = "2" percpu.workspace = true kbuild_config = { workspace = true } karch = { workspace = true } +kbootloader = { workspace = true } +linkme = { workspace = true } diff --git a/init/kruntime/src/lib.rs b/init/kruntime/src/lib.rs index 88bc9658162a63d759dfbe1e008a9369bdef6c71..a9c86b52e55de0dcbec0fdc6ccb981972d634d3f 100644 --- a/init/kruntime/src/lib.rs +++ b/init/kruntime/src/lib.rs @@ -25,6 +25,8 @@ mod lang_items; #[cfg(feature = "smp")] mod mp; +use kbootloader::{PRIMARY_KERNEL_ENTRY, register_boot_init}; + #[cfg(feature = "smp")] pub use self::mp::rust_main_secondary; @@ -118,13 +120,11 @@ impl kdma::DmaPageTableIf for DmaPageTableImpl { /// /// `cpu_id` is the logic ID of the current CPU, and `arg` is passed from the /// bootloader (typically the device tree blob address). -/// -/// In multi-core environment, this function is called on the primary core, and /// secondary cores call [`rust_main_secondary`]. -#[cfg_attr(not(test), kplat::main)] +#[register_boot_init(PRIMARY_KERNEL_ENTRY)] pub fn rust_main(cpu_id: usize, arg: usize) -> ! { - unsafe { khal::mem::clear_bss() }; khal::percpu::init_primary(cpu_id); + kcpu::init_trap(); khal::early_init(cpu_id, arg); kprintln!("{}", LOGO); diff --git a/init/kruntime/src/mp.rs b/init/kruntime/src/mp.rs index 6e0c1f8d85140291745a761338b521b2c0c8530c..32f8d3f096ba1970f61c8a6865e20740ed1a7172 100644 --- a/init/kruntime/src/mp.rs +++ b/init/kruntime/src/mp.rs @@ -5,6 +5,7 @@ //! SMP bring-up helpers for the runtime. use core::sync::atomic::{AtomicUsize, Ordering}; +use kbootloader::{SECOND_KERNEL_ENTRY, register_boot_init}; use kbuild_config::{CPU_NUM, TASK_STACK_SIZE}; use khal::mem::{VirtAddr, v2p}; @@ -38,9 +39,10 @@ pub fn start_secondary_cpus(primary_cpu_id: usize) { /// The main entry point of the runtime for secondary cores. /// /// It is called from the bootstrapping code in the specific platform crate. -#[kplat::secondary_main] +#[register_boot_init(SECOND_KERNEL_ENTRY)] pub fn rust_main_secondary(cpu_id: usize) -> ! { khal::percpu::init_secondary(cpu_id); + kcpu::init_trap(); khal::early_init_secondary(cpu_id); ENTERED_CPUS.fetch_add(1, Ordering::Release); diff --git a/platforms/aarch64-crosvm-virt/Cargo.toml b/platforms/aarch64-crosvm-virt/Cargo.toml index c01d17839e0246af5f8fdc901db9143645e05b05..7249f94e832daa68a56dcf06ff85e402d2f0823d 100644 --- a/platforms/aarch64-crosvm-virt/Cargo.toml +++ b/platforms/aarch64-crosvm-virt/Cargo.toml @@ -9,18 +9,19 @@ homepage.workspace = true repository.workspace = true [features] -fp-simd = ["kcpu/fp-simd"] +fp-simd = ["kcpu/fp-simd", "kbootloader/fp-simd"] smp = ["kplat/smp"] rtc = [] [dependencies] log = "0.4" -page_table = { workspace = true } kspin = "0.1" dw_apb_uart = "0.1" kcpu = { workspace = true } karch = { workspace = true } kplat = { workspace = true } +kbootloader = { workspace = true } +linkme = { workspace = true } aarch64-peripherals = { workspace = true, default-features = false, features = ["gicv3"] } aarch64-cpu = "10.0" rs_fdtree = { workspace = true } diff --git a/platforms/aarch64-crosvm-virt/defconfig b/platforms/aarch64-crosvm-virt/defconfig index d9a9a8f4fcbf4977dd8469e5db477dca910ef444..cdf11b00d78d1fbdc043a610e91effcdb8454e9c 100644 --- a/platforms/aarch64-crosvm-virt/defconfig +++ b/platforms/aarch64-crosvm-virt/defconfig @@ -7,19 +7,21 @@ ARCH_AARCH64=y # ARCH_LOONGARCH64 is not set # ARCH_RISCV64 is not set # ARCH_X86_64 is not set +BOOT_DEVICE_MM=[(0x0, 0x80000000)] +BOOT_NORMAL_MM=[(0x80000000, 0x100000000)] BOOT_STACK_SIZE=0x40000 # BUILD_TYPE_DEBUG is not set BUILD_TYPE_RELEASE=y CPU_NUM=2 -DMA_MEM_BASE=0x80000000 +DMA_MEM_BASE=0xc0000000 DMA_MEM_SIZE=0x80000 -GICC_PADDR=0x3fff0000 -GICD_PADDR=0x3ffb0000 +GICC_PADDR=0x3ffb0000 +GICD_PADDR=0x3fff0000 IPI_IRQ=1 KERNEL_ASPACE_BASE=0xffff000000000000 KERNEL_ASPACE_SIZE=0x0000fffffffff000 -KERNEL_BASE_PADDR=0x80080000 -KERNEL_BASE_VADDR=0xffff000080080000 +KERNEL_BASE_PADDR=0x80000000 +KERNEL_BASE_VADDR=0xffff000080000000 # LOG_LEVEL_DEBUG is not set # LOG_LEVEL_ERROR is not set # LOG_LEVEL_INFO is not set @@ -33,7 +35,7 @@ PHYS_BUS_OFFSET=0x0 PHYS_MEM_BASE=0x80000000 PHYS_MEM_SIZE=0x80000000 PHYS_VIRT_OFFSET=0xffff000000000000 -PLATFORM="aarch64-qemu-virt" +PLATFORM="aarch64-crosvm-virt" PLATFORM_AARCH64_CROSVM_VIRT=y # PLATFORM_AARCH64_QEMU_VIRT is not set # PLATFORM_AARCH64_RASPI is not set @@ -44,7 +46,6 @@ PLATFORM_AARCH64_CROSVM_VIRT=y PMU_IRQ=23 PSCI_METHOD="hvc" RTC_PADDR=0x2000 -SEV_CBIT_POS=47 TASK_STACK_SIZE=0x40000 TICKS_PER_SECOND=100 TIMER_IRQ=30 diff --git a/platforms/aarch64-crosvm-virt/src/boot.rs b/platforms/aarch64-crosvm-virt/src/boot.rs deleted file mode 100644 index 5a47edf2bcb8a8cd11de066c3c329dc8d735f25c..0000000000000000000000000000000000000000 --- a/platforms/aarch64-crosvm-virt/src/boot.rs +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2025 KylinSoft Co., Ltd. -// See LICENSES for license details. - -//! Early boot and entry stubs for the aarch64 crosvm-virt platform. -use aarch64_cpu::registers::*; -use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET}; -use kplat::memory::{PageAligned, pa}; -use page_table::{ - PageTableEntry as GenericPTE, PagingFlags as MappingFlags, aarch64::A64PageEntry as A64PTE, -}; - -#[unsafe(link_section = ".bss.stack")] -static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; -#[unsafe(link_section = ".data")] -static mut BOOT_PT_L0: PageAligned<[A64PTE; 512]> = PageAligned::new([A64PTE::empty(); 512]); -#[unsafe(link_section = ".data")] -static mut BOOT_PT_L1: PageAligned<[A64PTE; 512]> = PageAligned::new([A64PTE::empty(); 512]); -use crate::serial::{boot_print_str, boot_print_usize}; -/// Build the minimal page tables used before the full MMU setup. -unsafe fn init_boot_page_table() { - boot_print_str("[boot] init boot page table\r\n"); - crate::psci::kvm_guard_granule_init(); - boot_print_str("[boot] kvm xmap pci cam\r\n"); - crate::psci::do_xmap_granules(0x7200_0000, 0x100_0000); - boot_print_str("[boot] kvm xmap pci mem\r\n"); - crate::psci::do_xmap_granules(0x7000_0000, 0x200_0000); - boot_print_str("[boot] kvm xmap gicv3 mem\r\n"); - crate::psci::do_xmap_granules(0x3ffb_0000, 0x20_0000); - boot_print_str("[boot] kvm xmap rtc\r\n"); - crate::psci::do_xmap_granules(0x2000, 0x1000); - unsafe { - BOOT_PT_L0[0] = A64PTE::new_table(pa!(&raw mut BOOT_PT_L1 as usize)); - BOOT_PT_L1[0] = A64PTE::new_page( - pa!(0), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::DEVICE, - true, - ); - BOOT_PT_L1[1] = A64PTE::new_page( - pa!(0x4000_0000), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::DEVICE, - true, - ); - BOOT_PT_L1[2] = A64PTE::new_page( - pa!(0x8000_0000), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXECUTE, - true, - ); - BOOT_PT_L1[3] = A64PTE::new_page( - pa!(0xC000_0000), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXECUTE, - true, - ); - } -} -#[unsafe(no_mangle)] -/// Boot-time smoke test entry for early debugging. -extern "C" fn kernel_main_test() { - boot_print_str("[boot] kernel main entered cpu id\r\n"); -} -/// Enable FP/SIMD usage if supported by build features. -unsafe fn enable_fp() { - #[cfg(feature = "fp-simd")] - karch::enable_fp(); -} -#[unsafe(naked)] -#[unsafe(no_mangle)] -#[unsafe(link_section = ".text.boot")] -/// First instruction executed by the primary CPU. -unsafe extern "C" fn _start() -> ! { - core::arch::naked_asm!(" - bl {entry} // Branch to kernel start, magic - .space 52, 0 - .inst 0x644d5241 - .space 4, 0 - ", - entry = sym _start_primary, - ) -} -/// Switch the current exception level to EL1. -unsafe fn switch_to_el1() { - let current_sp = aarch64_cpu::registers::SP.get(); - SPSel.write(SPSel::SP::ELx); - aarch64_cpu::registers::SP.set(current_sp); - let current_el = CurrentEL.read(CurrentEL::EL); - boot_print_str("[boot] Current el "); - boot_print_usize(current_el as _); -} -#[unsafe(naked)] -/// Primary CPU boot path: set up stack, MMU, and jump to `kplat::entry`. -unsafe extern "C" fn _start_primary() -> ! { - core::arch::naked_asm!(" - mrs x19, mpidr_el1 - and x19, x19, #0xffffff // get current CPU id - mov x20, x0 // save DTB pointer - adrp x8, {boot_stack} // setup boot stack - add x8, x8, {boot_stack_size} - mov sp, x8 - bl {switch_to_el1} // switch to EL1 - bl {enable_fp} // enable fp/neon - bl {init_boot_page_table} - adrp x0, {boot_pt} - bl {init_mmu} // setup MMU - mov x8, {phys_virt_offset} // set SP to the high address - add sp, sp, x8 - mov x0, x19 // call_main(cpu_id, dtb) - mov x1, x20 - ldr x8, ={entry} - blr x8 - b . - ", - switch_to_el1 = sym switch_to_el1, - init_boot_page_table = sym init_boot_page_table, - init_mmu = sym kcpu::boot::init_mmu, - enable_fp = sym enable_fp, - boot_pt = sym BOOT_PT_L0, - phys_virt_offset = const PHYS_VIRT_OFFSET, - entry = sym kplat::entry, - boot_stack = sym BOOT_STACK, - boot_stack_size = const BOOT_STACK_SIZE, - ) -} -#[cfg(feature = "smp")] -#[unsafe(naked)] -/// Secondary CPU boot path for SMP bring-up. -pub(crate) unsafe extern "C" fn _start_secondary() -> ! { - core::arch::naked_asm!(" - mrs x19, mpidr_el1 - and x19, x19, #0xffffff // get current CPU id - mov sp, x0 - bl {switch_to_el1} - bl {enable_fp} - adrp x0, {boot_pt} - bl {init_mmu} - mov x8, {phys_virt_offset} // set SP to the high address - add sp, sp, x8 - mov x0, x19 // call_secondary_main(cpu_id) - ldr x8, ={entry} - blr x8 - b .", - switch_to_el1 = sym kcpu::boot::switch_to_el1, - init_mmu = sym kcpu::boot::init_mmu, - enable_fp = sym enable_fp, - boot_pt = sym BOOT_PT_L0, - phys_virt_offset = const PHYS_VIRT_OFFSET, - entry = sym kplat::entry_secondary, - ) -} diff --git a/platforms/aarch64-crosvm-virt/src/fdt.rs b/platforms/aarch64-crosvm-virt/src/fdt.rs index e8481fe901f6e50e3e9f60e93e0cebd4edd12bb5..7a540752bef4a3bef261d63d52f38f133bfb4400 100644 --- a/platforms/aarch64-crosvm-virt/src/fdt.rs +++ b/platforms/aarch64-crosvm-virt/src/fdt.rs @@ -9,10 +9,10 @@ use log::*; use rs_fdtree::{InterruptController, LinuxFdt}; pub static FDT: Once = Once::new(); /// Parse and cache the FDT pointed to by the bootloader. -pub(crate) fn init_fdt(fdt_paddr: VirtAddr) { - info!("FDT addr is: {:x}", fdt_paddr.as_usize()); +pub(crate) fn init_fdt(fdt_vaddr: VirtAddr) { + info!("FDT addr is: {:x}", fdt_vaddr.as_usize()); let fdt = unsafe { - LinuxFdt::from_ptr(fdt_paddr.as_usize() as *const u8).expect("Failed to parse FDT") + LinuxFdt::from_ptr(fdt_vaddr.as_usize() as *const u8).expect("Failed to parse FDT") }; FDT.call_once(|| fdt); dice_reg(); @@ -34,16 +34,13 @@ pub fn dice_reg() -> Option<(VirtAddr, usize)> { let dice = FDT.get().unwrap().dice(); if let Some(dice_node) = dice { info!("Found DICE node in FDT"); - for reg in dice_node.regions().expect("DICE regions") { + if let Some(reg) = dice_node.regions().expect("DICE regions").next() { info!( "DICE region: addr=0x{:x}, size=0x{:x}", reg.starting_address as usize, reg.size ); + let va = p2v(pa!(reg.starting_address as usize)); - unsafe { - let test_ptr = va.as_mut_ptr(); - let _ = test_ptr.read_volatile(); - } return Some((va, reg.size)); } } diff --git a/platforms/aarch64-crosvm-virt/src/gicv3.rs b/platforms/aarch64-crosvm-virt/src/gicv3.rs index fdc739042bff4caf4872576396a1a67a6042db8b..3e9ce1152f8b230f7ac4e29c512b99bad7edcbd9 100644 --- a/platforms/aarch64-crosvm-virt/src/gicv3.rs +++ b/platforms/aarch64-crosvm-virt/src/gicv3.rs @@ -103,8 +103,7 @@ fn end_of_interrupt(irq: usize) { GicV3::end_interrupt(IntId::from(irq as u32)); } fn get_and_acknowledge_interrupt() -> usize { - let irq = u32::from(GicV3::get_and_acknowledge_interrupt().unwrap()) as usize; - return irq; + u32::from(GicV3::get_and_acknowledge_interrupt().unwrap()) as usize } /// Send a software-generated interrupt to target CPUs. pub fn notify_cpu(irq: usize, target: kplat::interrupts::TargetCpu) { @@ -121,11 +120,11 @@ pub fn notify_cpu(irq: usize, target: kplat::interrupts::TargetCpu) { } #[allow(dead_code)] fn test_manual_trigger() { - let gicd_base = 0xffff00003fff0000 as usize; + let gicd_base = 0xffff00003fff0000_usize; info!("=== Manual Trigger Test ==="); unsafe { - core::ptr::write_volatile((gicd_base + 0x200 + 1 * 4) as *mut u32, 0x1); - let ispendr = core::ptr::read_volatile((gicd_base + 0x200 + 1 * 4) as *const u32); + core::ptr::write_volatile((gicd_base + 0x200 + 4) as *mut u32, 0x1); + let ispendr = core::ptr::read_volatile((gicd_base + 0x200 + 4) as *const u32); info!("Manual trigger: ISPENDR = {:#x}", ispendr); } for _ in 0..1000 { @@ -136,7 +135,7 @@ fn test_manual_trigger() { #[allow(dead_code)] fn debug_irq_32() { let irq = 32; - let gicd_base = 0xffff00003fff0000 as usize; + let gicd_base = 0xffff00003fff0000_usize; unsafe { let isenabler = core::ptr::read_volatile((gicd_base + 0x100 + (irq / 32) * 4) as *const u32); diff --git a/platforms/aarch64-crosvm-virt/src/init.rs b/platforms/aarch64-crosvm-virt/src/init.rs index cfd3625ed99322a96627633c72b63840747c7b8b..303f8d10d8e3a502d15e1283ae8f99be8a237c85 100644 --- a/platforms/aarch64-crosvm-virt/src/init.rs +++ b/platforms/aarch64-crosvm-virt/src/init.rs @@ -13,17 +13,21 @@ use kplat::{ }; use log::*; -use crate::serial::*; +fn map_kvm_guarded_mmio() { + crate::psci::kvm_guard_granule_init(); + crate::psci::do_xmap_granules(0x7200_0000, 0x100_0000); + crate::psci::do_xmap_granules(0x7000_0000, 0x200_0000); + crate::psci::do_xmap_granules(0x3ffb_0000, 0x20_0000); + crate::psci::do_xmap_granules(0x2000, 0x1000); +} -/// Platform-specific `BootHandler` implementation. struct BootHandlerImpl; #[impl_dev_interface] impl BootHandler for BootHandlerImpl { /// Perform early, minimal init before the allocator is ready. fn early_init(_cpu_id: usize, dtb: usize) { - boot_print_str("[boot] platform init early\r\n"); + map_kvm_guarded_mmio(); crate::mem::early_init(dtb); - kcpu::boot::init_trap(); aarch64_peripherals::ns16550a::early_init(p2v(pa!(UART_PADDR))); aarch64_peripherals::psci::init(PSCI_METHOD); aarch64_peripherals::generic_timer::early_init(); @@ -32,9 +36,7 @@ impl BootHandler for BootHandlerImpl { } #[cfg(feature = "smp")] - fn early_init_ap(_cpu_id: usize) { - kcpu::boot::init_trap(); - } + fn early_init_ap(_cpu_id: usize) {} /// Finish platform init after core subsystems are online. fn final_init(cpu_id: usize, dtb: usize) { diff --git a/platforms/aarch64-crosvm-virt/src/lib.rs b/platforms/aarch64-crosvm-virt/src/lib.rs index 2ed0e748b150227190ad676540c5254616bcac1b..55ec30c02fb1e62bbb8131435e27ab68e07d5953 100644 --- a/platforms/aarch64-crosvm-virt/src/lib.rs +++ b/platforms/aarch64-crosvm-virt/src/lib.rs @@ -6,14 +6,13 @@ #![no_std] #[macro_use] extern crate kplat; -mod boot; +extern crate kbootloader; pub mod fdt; mod gicv3; mod init; mod mem; mod power; pub mod psci; -mod serial; aarch64_peripherals::ns16550_console_if_impl!(ConsoleImpl); aarch64_peripherals::time_if_impl!(GlobalTimerImpl); irq_if_impl!(IntrManagerImpl); diff --git a/platforms/aarch64-crosvm-virt/src/mem.rs b/platforms/aarch64-crosvm-virt/src/mem.rs index 04269f3107c33d24c289c2273d3460f22e46b664..23be631df2118d071102ac25ab67a39ce8520269 100644 --- a/platforms/aarch64-crosvm-virt/src/mem.rs +++ b/platforms/aarch64-crosvm-virt/src/mem.rs @@ -6,7 +6,7 @@ use core::sync::atomic::{AtomicUsize, Ordering}; use kbuild_config::{MMIO_RANGES, PHYS_MEM_BASE, PHYS_MEM_SIZE, PHYS_VIRT_OFFSET}; -use kplat::memory::{HwMemory, MemRange, PhysAddr, VirtAddr, pa, va}; +use kplat::memory::{HwMemory, MemRange, PhysAddr, VirtAddr, p2v, pa, va}; use ktypes::Once; use rs_fdtree::LinuxFdt; @@ -17,16 +17,16 @@ static DICE_MEM_BASE: AtomicUsize = AtomicUsize::new(0); static DICE_MEM_SIZE: AtomicUsize = AtomicUsize::new(0); /// Capture FDT/DICE memory ranges before the allocator is initialized. pub(crate) fn early_init(fdt_paddr: usize) { + let fdt_va = p2v(pa!(fdt_paddr)); FDT_MEM_BASE.store(fdt_paddr, Ordering::SeqCst); - let fdt = unsafe { LinuxFdt::from_ptr(fdt_paddr as *const u8).expect("Failed to parse FDT") }; - fdt.dice().map(|dice_node| { - let dice = dice_node; - for reg in dice.regions().expect("DICE regions") { - DICE_MEM_BASE.store(reg.starting_address as usize, Ordering::SeqCst); - DICE_MEM_SIZE.store(reg.size as usize, Ordering::SeqCst); - break; - } - }); + let fdt = + unsafe { LinuxFdt::from_ptr(fdt_va.as_usize() as *const u8).expect("Failed to parse FDT") }; + if let Some(dice) = fdt.dice() + && let Some(reg) = dice.regions().expect("DICE regions").next() + { + DICE_MEM_BASE.store(reg.starting_address as usize, Ordering::SeqCst); + DICE_MEM_SIZE.store(reg.size, Ordering::SeqCst); + } } /// Platform-specific memory description for the kernel. struct HwMemoryImpl; diff --git a/platforms/aarch64-crosvm-virt/src/power.rs b/platforms/aarch64-crosvm-virt/src/power.rs index b4653fa931a252a6b0fefe0f19ce905005966727..ae60ea77e90ebb4a1913011b6fa275a645c51b70 100644 --- a/platforms/aarch64-crosvm-virt/src/power.rs +++ b/platforms/aarch64-crosvm-virt/src/power.rs @@ -11,7 +11,9 @@ impl SysCtrl for PowerImpl { #[cfg(feature = "smp")] fn boot_ap(cpu_id: usize, stack_top_paddr: usize) { use kplat::memory::{v2p, va}; - let entry_paddr = v2p(va!(crate::boot::_start_secondary as *const () as usize)); + let entry_paddr = v2p(va!( + kbootloader::arch::_start_secondary as *const () as usize + )); aarch64_peripherals::psci::cpu_on(cpu_id, entry_paddr.as_usize(), stack_top_paddr); } diff --git a/platforms/aarch64-crosvm-virt/src/psci.rs b/platforms/aarch64-crosvm-virt/src/psci.rs index 1208fdaea72e702b3e47a90a017242f24b9e9815..3c7e57b4c08f84230166969371f0575f7cd9b351 100644 --- a/platforms/aarch64-crosvm-virt/src/psci.rs +++ b/platforms/aarch64-crosvm-virt/src/psci.rs @@ -5,8 +5,6 @@ //! PSCI wrappers and KVM guard-granule helpers. use kplat::psci::PsciOp; use ktypes::Once; - -use crate::serial::{boot_print_str, boot_print_usize}; pub static GUARD_GRANULE: Once = Once::new(); const ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID: u32 = ((1) << 31) | ((1) << 30) | (((6) & 0x3F) << 24) | ((4) & 0xFFFF); @@ -35,10 +33,8 @@ pub fn kvm_guard_granule_init() { psci_hvc_call(ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_INFO_FUNC_ID, 0, 0, 0); assert_eq!(guard_has_range, 0x1); GUARD_GRANULE.call_once(|| guard_granule); - boot_print_str("KVM MMIO guard granule: "); - boot_print_usize(guard_granule); } -fn __invoke_mmioguard(phys_addr: usize, nr_granules: usize, map: bool) -> usize { +fn __invoke_mmioguard(phys_addr: usize, _nr_granules: usize, map: bool) -> usize { let func_id: u32 = if map { ((1) << 31) | ((1) << 30) | (((6) & 0x3F) << 24) | ((10) & 0xFFFF) } else { @@ -46,20 +42,9 @@ fn __invoke_mmioguard(phys_addr: usize, nr_granules: usize, map: bool) -> usize }; let (result, done) = psci_hvc_call(func_id, phys_addr, 1, 0); if result != 0 { - boot_print_str("[error] psci_hvc_call failed\r\n"); - boot_print_str(" func = "); - boot_print_usize(func_id as _); - boot_print_str(" arg0 = "); - boot_print_usize(phys_addr); - boot_print_str(" arg1 = "); - boot_print_usize(nr_granules); - boot_print_str(" ret0 = "); - boot_print_usize(result); - boot_print_str(" ret1 = "); - boot_print_usize(done); panic!(); } - return done; + done } fn __do_xmap_granules(phys_addr: usize, nr_granules: usize, map: bool) -> usize { let mut nr_xmapped = 0; @@ -69,13 +54,12 @@ fn __do_xmap_granules(phys_addr: usize, nr_granules: usize, map: bool) -> usize let __nr_xmapped = __invoke_mmioguard(phys_addr, nr_granules as usize, map); nr_xmapped += __nr_xmapped; if __nr_xmapped as isize > nr_granules { - boot_print_str("[warning] __invoke_mmioguard"); break; } phys_addr += __nr_xmapped * { GUARD_GRANULE.get().unwrap() }; nr_granules -= __nr_xmapped as isize; } - return nr_xmapped; + nr_xmapped } /// Map a physical MMIO range via the KVM guard-granule interface. pub fn do_xmap_granules(phys_addr: usize, size: usize) { diff --git a/platforms/aarch64-qemu-virt/Cargo.toml b/platforms/aarch64-qemu-virt/Cargo.toml index e5b13ce0ada50a2945d40da2a202dd9b2fb3c3de..c65774a4fb8abaf5ce5520a110edef56eccc0bce 100644 --- a/platforms/aarch64-qemu-virt/Cargo.toml +++ b/platforms/aarch64-qemu-virt/Cargo.toml @@ -9,7 +9,7 @@ homepage.workspace = true repository.workspace = true [features] -fp-simd = ["kcpu/fp-simd"] +fp-simd = ["kcpu/fp-simd", "kbootloader/fp-simd"] rtc = [] smp = ["kplat/smp"] nmi = ["aarch64-peripherals/nmi-pmu", "kplat/nmi", "pmu"] @@ -23,6 +23,7 @@ kcpu = { workspace = true } karch = { workspace = true } kplat = { workspace = true } kbuild_config = { workspace = true } +kbootloader = { workspace = true } [package.metadata.docs.rs] targets = ["aarch64-unknown-none"] diff --git a/platforms/aarch64-qemu-virt/defconfig b/platforms/aarch64-qemu-virt/defconfig index 8a6f16f7f9e825a73f7dac96946319bafe4b481b..dec9695e89b1f432113c4679a3ddca351e3497d3 100644 --- a/platforms/aarch64-qemu-virt/defconfig +++ b/platforms/aarch64-qemu-virt/defconfig @@ -7,6 +7,8 @@ ARCH_AARCH64=y # ARCH_LOONGARCH64 is not set # ARCH_RISCV64 is not set # ARCH_X86_64 is not set +BOOT_DEVICE_MM=[(0x0, 0x40000000)] +BOOT_NORMAL_MM=[(0x40000000, 0x80000000)] BOOT_STACK_SIZE=0x40000 # BUILD_TYPE_DEBUG is not set BUILD_TYPE_RELEASE=y @@ -44,7 +46,6 @@ PLATFORM_AARCH64_QEMU_VIRT=y PMU_IRQ=23 PSCI_METHOD="hvc" RTC_PADDR=0x09010000 -SEV_CBIT_POS=47 TASK_STACK_SIZE=0x40000 TICKS_PER_SECOND=100 TIMER_IRQ=30 diff --git a/platforms/aarch64-qemu-virt/src/boot.rs b/platforms/aarch64-qemu-virt/src/boot.rs deleted file mode 100644 index d3dacd3bd51e1b1115936e7983ebb0ea41429e75..0000000000000000000000000000000000000000 --- a/platforms/aarch64-qemu-virt/src/boot.rs +++ /dev/null @@ -1,115 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2025 KylinSoft Co., Ltd. -// See LICENSES for license details. - -//! Boot entry and early page table setup for aarch64-qemu-virt. - -use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET}; -use kplat::memory::{PageAligned, pa}; -use page_table::{ - PageTableEntry as GenericPTE, PagingFlags as MappingFlags, aarch64::A64PageEntry as A64PTE, -}; -#[unsafe(link_section = ".bss.stack")] -static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; -#[unsafe(link_section = ".data")] -static mut BOOT_PT_L0: PageAligned<[A64PTE; 512]> = PageAligned::new([A64PTE::empty(); 512]); -#[unsafe(link_section = ".data")] -static mut BOOT_PT_L1: PageAligned<[A64PTE; 512]> = PageAligned::new([A64PTE::empty(); 512]); -unsafe fn init_boot_page_table() { - unsafe { - BOOT_PT_L0[0] = A64PTE::new_table(pa!(&raw mut BOOT_PT_L1 as usize)); - BOOT_PT_L1[0] = A64PTE::new_page( - pa!(0), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::DEVICE, - true, - ); - BOOT_PT_L1[1] = A64PTE::new_page( - pa!(0x4000_0000), - MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXECUTE, - true, - ); - } -} -unsafe fn enable_fp() { - #[cfg(feature = "fp-simd")] - karch::enable_fp(); -} -#[unsafe(naked)] -#[unsafe(no_mangle)] -#[unsafe(link_section = ".text.boot")] -unsafe extern "C" fn _start() -> ! { - const FLAG_LE: usize = 0b0; - const FLAG_PAGE_SIZE_4K: usize = 0b10; - const FLAG_ANY_MEM: usize = 0b1000; - core::arch::naked_asm!(" - add x13, x18, #0x16 // 'MZ' magic - b {entry} // Branch to kernel start, magic - .quad 0 // Image load offset from start of RAM, little-endian - .quad _ekernel - _start // Effective size of kernel image, little-endian - .quad {flags} // Kernel flags, little-endian - .quad 0 // reserved - .quad 0 // reserved - .quad 0 // reserved - .ascii \"ARM\\x64\" // Magic number - .long 0 // reserved (used for PE COFF offset)", - flags = const FLAG_LE | FLAG_PAGE_SIZE_4K | FLAG_ANY_MEM, - entry = sym _start_primary, - ) -} -#[unsafe(naked)] -unsafe extern "C" fn _start_primary() -> ! { - core::arch::naked_asm!(" - mrs x19, mpidr_el1 - and x19, x19, #0xffffff // get current CPU id - mov x20, x0 // save DTB pointer - adrp x8, {boot_stack} // setup boot stack - add x8, x8, {boot_stack_size} - mov sp, x8 - bl {switch_to_el1} // switch to EL1 - bl {enable_fp} // enable fp/neon - bl {init_boot_page_table} - adrp x0, {boot_pt} - bl {init_mmu} // setup MMU - mov x8, {phys_virt_offset} // set SP to the high address - add sp, sp, x8 - mov x0, x19 // call_main(cpu_id, dtb) - mov x1, x20 - ldr x8, ={entry} - blr x8 - b .", - switch_to_el1 = sym kcpu::boot::switch_to_el1, - init_mmu = sym kcpu::boot::init_mmu, - init_boot_page_table = sym init_boot_page_table, - enable_fp = sym enable_fp, - boot_pt = sym BOOT_PT_L0, - boot_stack = sym BOOT_STACK, - boot_stack_size = const BOOT_STACK_SIZE, - phys_virt_offset = const PHYS_VIRT_OFFSET, - entry = sym kplat::entry, - ) -} -#[cfg(feature = "smp")] -#[unsafe(naked)] -pub(crate) unsafe extern "C" fn _start_secondary() -> ! { - core::arch::naked_asm!(" - mrs x19, mpidr_el1 - and x19, x19, #0xffffff // get current CPU id - mov sp, x0 - bl {switch_to_el1} - bl {enable_fp} - adrp x0, {boot_pt} - bl {init_mmu} - mov x8, {phys_virt_offset} // set SP to the high address - add sp, sp, x8 - mov x0, x19 // call_secondary_main(cpu_id) - ldr x8, ={entry} - blr x8 - b .", - switch_to_el1 = sym kcpu::boot::switch_to_el1, - init_mmu = sym kcpu::boot::init_mmu, - enable_fp = sym enable_fp, - boot_pt = sym BOOT_PT_L0, - phys_virt_offset = const PHYS_VIRT_OFFSET, - entry = sym kplat::entry_secondary, - ) -} diff --git a/platforms/aarch64-qemu-virt/src/init.rs b/platforms/aarch64-qemu-virt/src/init.rs index cb6a68a44b3cfb2f069888560b5dd9e9bc0d623f..c922e439b14f656558bf34700cc6d1c365059e69 100644 --- a/platforms/aarch64-qemu-virt/src/init.rs +++ b/platforms/aarch64-qemu-virt/src/init.rs @@ -16,7 +16,6 @@ struct BootHandlerImpl; #[impl_dev_interface] impl BootHandler for BootHandlerImpl { fn early_init(_cpu_id: usize, _dtb: usize) { - kcpu::boot::init_trap(); aarch64_peripherals::pl011::early_init(p2v(pa!(UART_PADDR))); aarch64_peripherals::psci::init(PSCI_METHOD); aarch64_peripherals::generic_timer::early_init(); @@ -25,9 +24,7 @@ impl BootHandler for BootHandlerImpl { } #[cfg(feature = "smp")] - fn early_init_ap(_cpu_id: usize) { - kcpu::boot::init_trap(); - } + fn early_init_ap(_cpu_id: usize) {} fn final_init(_cpu_id: usize, _dtb: usize) { aarch64_peripherals::gic::init_gic(p2v(pa!(GICD_PADDR)), p2v(pa!(GICC_PADDR))); diff --git a/platforms/aarch64-qemu-virt/src/lib.rs b/platforms/aarch64-qemu-virt/src/lib.rs index 223949ff370b4adcbac6f2e673a0cadf58d1bc18..54acd56ba6a5209d033c3c7cb7399e18c7b82114 100644 --- a/platforms/aarch64-qemu-virt/src/lib.rs +++ b/platforms/aarch64-qemu-virt/src/lib.rs @@ -7,8 +7,9 @@ #![no_std] #[macro_use] extern crate kplat; +// Force-link kbootloader so that _start and boot code are included in the final binary. +extern crate kbootloader; -mod boot; mod init; mod mem; mod power; diff --git a/platforms/aarch64-qemu-virt/src/power.rs b/platforms/aarch64-qemu-virt/src/power.rs index 49fa30452125e8a537250b7db860267e3284afda..c048301cc29f47c9cfac41fc9a13017abaaa1cdd 100644 --- a/platforms/aarch64-qemu-virt/src/power.rs +++ b/platforms/aarch64-qemu-virt/src/power.rs @@ -5,13 +5,16 @@ //! Power control implementation for aarch64-qemu-virt. use kplat::sys::SysCtrl; + struct PowerImpl; #[impl_dev_interface] impl SysCtrl for PowerImpl { #[cfg(feature = "smp")] fn boot_ap(cpu_id: usize, stack_top_paddr: usize) { use kplat::memory::{v2p, va}; - let entry_paddr = v2p(va!(crate::boot::_start_secondary as *const () as usize)); + let entry_paddr = v2p(va!( + kbootloader::arch::_start_secondary as *const () as usize + )); aarch64_peripherals::psci::cpu_on(cpu_id, entry_paddr.as_usize(), stack_top_paddr); } diff --git a/platforms/kbootloader/Cargo.toml b/platforms/kbootloader/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4301bd43c67e917439b6af79a96caeafc4eb37fb --- /dev/null +++ b/platforms/kbootloader/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "kbootloader" +version.workspace = true +edition.workspace = true +license.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +keywords = ["bootloader", "x-kernel", "multi-arch", "position-independent"] +description = "Position-independent bootloader for x-kernel (multi-arch)" + +[features] +default = [] +fp-simd = [] + +[dependencies] +kbuild_config = { workspace = true } +linkme = { workspace = true } + +# Architecture-specific dependencies +[target.'cfg(target_arch = "aarch64")'.dependencies] +aarch64-cpu = "10.0" +karch = { workspace = true } +page_table = { workspace = true } +memaddr = { workspace = true } + +[target.'cfg(target_arch = "x86_64")'.dependencies] +x86 = "0.52" +x86_64 = { workspace = true } + +[build-dependencies] +kbuild_config.workspace = true diff --git a/arch/khal/build.rs b/platforms/kbootloader/build.rs similarity index 85% rename from arch/khal/build.rs rename to platforms/kbootloader/build.rs index 123c16f9a4012d5a5fbf1a73d05d4f3b89806aec..70c9d4968b74bb45627938fff334aa06c3069402 100644 --- a/arch/khal/build.rs +++ b/platforms/kbootloader/build.rs @@ -1,18 +1,18 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2025 KylinSoft Co., Ltd. -// See LICENSES for license details. - -//! Build script for generating the platform linker script. - use std::{io::Result, path::Path}; -/// Entry point for build script. fn main() { + println!("cargo:rerun-if-changed=linker.lds.S"); + println!("cargo:rerun-if-changed=build.rs"); let arch = std::env::var("CARGO_CFG_TARGET_ARCH").unwrap(); let platform = kbuild_config::PLATFORM; if platform != "unknown" { gen_linker_script(&arch, platform).unwrap(); } + + // println!("cargo:rustc-link-arg=-T{}", linker_script_path.display()); + // println!("cargo:rustc-link-arg=-pie"); + // println!("cargo:rustc-link-arg=-Bsymbolic"); + // println!("cargo:rustc-link-arg=--no-dynamic-linker"); } /// Generates a linker script for the given target arch and platform. @@ -31,6 +31,7 @@ fn gen_linker_script(arch: &str, platform: &str) -> Result<()> { "%KERNEL_BASE%", &format!("{:#x}", kbuild_config::KERNEL_BASE_VADDR), ); + let ld_content = ld_content.replace("%CPU_NUM%", &format!("{}", kbuild_config::CPU_NUM)); let ld_content = ld_content.replace( "%DWARF%", diff --git a/platforms/kbootloader/linker.lds.S b/platforms/kbootloader/linker.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..50eab20963c7ddddb7b3d50af7e49261569a49f1 --- /dev/null +++ b/platforms/kbootloader/linker.lds.S @@ -0,0 +1,195 @@ +OUTPUT_ARCH(%ARCH%) + +BASE_ADDRESS = %KERNEL_BASE%; + +SECTIONS +{ + . = BASE_ADDRESS; + _skernel = .; + .head.text : { + _text = .; + KEEP(*(.head.text)) + } + + .idmap.text : ALIGN(64){ + _idmap_text = .; + KEEP(*(.idmap.text)) + . = ALIGN(4K); + _idmap_text_end = .; + } + + .text : ALIGN(4K) { + _stext = .; + *(.text.boot) + *(.text .text.*) + *(.ltext .ltext.*) + . = ALIGN(4K); + _etext = .; + } + + _srodata = .; + .rodata : ALIGN(4K) { + *(.rodata .rodata.*) + *(.srodata .srodata.*) + *(.sdata2 .sdata2.*) + } + + .init_array : ALIGN(0x10) { + __init_array_start = .; + *(.init_array .init_array.*) + __init_array_end = .; + } + + %DWARF% + + . = ALIGN(4K); + _erodata = .; + + .data : ALIGN(4K) { + _sdata = .; + *(.data.boot_page_table) + . = ALIGN(4K); + *(.data .data.*) + *(.ldata .ldata.*) + *(.sdata .sdata.*) + *(.got .got.*) + + . = ALIGN(0x10); + __sdriver_register = .; + KEEP(*(.driver.register*)) + __edriver_register = .; + + . = ALIGN(0x10); + _ex_table_start = .; + KEEP(*(__ex_table)) + _ex_table_end = .; + } + + .tdata : ALIGN(0x10) { + _stdata = .; + *(.tdata .tdata.*) + _etdata = .; + } + + .tbss : ALIGN(0x10) { + _stbss = .; + *(.tbss .tbss.*) + *(.tcommon) + _etbss = .; + } + + . = ALIGN(4K); + _percpu_start = .; + _percpu_end = _percpu_start + SIZEOF(.percpu); + .percpu 0x0 : AT(_percpu_start) { + _percpu_load_start = .; + *(.percpu .percpu.*) + _percpu_load_end = .; + . = _percpu_load_start + ALIGN(64) * %CPU_NUM%; + } + . = _percpu_end; + + . = ALIGN(4K); + _edata = .; + + .bss : AT(.) ALIGN(4K) { + boot_stack = .; + *(.bss.stack) + . = ALIGN(4K); + boot_stack_top = .; + + _sbss = .; + *(.bss .bss.*) + *(.lbss .lbss.*) + *(.sbss .sbss.*) + *(COMMON) + . = ALIGN(4K); + _ebss = .; + } + + _ekernel = .; + + /DISCARD/ : { + *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) + } +} + +SECTIONS { + linkme_IRQ : { KEEP(*(linkme_IRQ)) } + linkm2_IRQ : { KEEP(*(linkm2_IRQ)) } + linkme_PAGE_FAULT : { KEEP(*(linkme_PAGE_FAULT)) } + linkm2_PAGE_FAULT : { KEEP(*(linkm2_PAGE_FAULT)) } + linkme_INIT_TRAP : { KEEP(*(linkme_INIT_TRAP)) } + linkm2_INIT_TRAP : { KEEP(*(linkm2_INIT_TRAP)) } + linkme_PRIMARY_KERNEL_ENTRY : { KEEP(*(linkme_PRIMARY_KERNEL_ENTRY)) } + linkm2_PRIMARY_KERNEL_ENTRY : { KEEP(*(linkm2_PRIMARY_KERNEL_ENTRY)) } + linkme_SECOND_KERNEL_ENTRY : { KEEP(*(linkme_SECOND_KERNEL_ENTRY)) } + linkm2_SECOND_KERNEL_ENTRY : { KEEP(*(linkm2_SECOND_KERNEL_ENTRY)) } + scope_local : { KEEP(*(scope_local)) } + + /* Unittest section */ + .unittest : ALIGN(8) { + __unittest_start = .; + KEEP(*(.unittest .unittest.*)) + __unittest_end = .; + } + + /* LLVM profiling sections */ + __llvm_prf_data : ALIGN(8) { + PROVIDE(__start___llvm_prf_data = .); + __llvm_prf_data_start = .; + KEEP(*(__llvm_prf_data)) + PROVIDE(__stop___llvm_prf_data = .); + __llvm_prf_data_end = .; + } + + __llvm_prf_vnds : ALIGN(8) { + PROVIDE(__start___llvm_prf_vnds = .); + __llvm_prf_vnds_start = .; + KEEP(*(__llvm_prf_vnds)) + PROVIDE(__stop___llvm_prf_vnds = .); + __llvm_prf_vnds_end = .; + } + + __llvm_prf_vns : ALIGN(8) { + PROVIDE(__start___llvm_prf_vns = .); + KEEP(*(__llvm_prf_vns)) + PROVIDE(__stop___llvm_prf_vns = .); + } + + __llvm_prf_vtab : ALIGN(8) { + PROVIDE(__start___llvm_prf_vtab = .); + KEEP(*(__llvm_prf_vtab)) + PROVIDE(__stop___llvm_prf_vtab = .); + } + + __llvm_prf_names : ALIGN(8) { + PROVIDE(__start___llvm_prf_names = .); + __llvm_prf_names_start = .; + KEEP(*(__llvm_prf_names)) + PROVIDE(__stop___llvm_prf_names = .); + __llvm_prf_names_end = .; + } + + __llvm_prf_bits : ALIGN(8) { + PROVIDE(__start___llvm_prf_bits = .); + KEEP(*(__llvm_prf_bits)) + PROVIDE(__stop___llvm_prf_bits = .); + } + + __llvm_prf_cnts : ALIGN(8) { + PROVIDE(__start___llvm_prf_cnts = .); + __llvm_prf_cnts_start = .; + KEEP(*(__llvm_prf_cnts)) + PROVIDE(__stop___llvm_prf_cnts = .); + __llvm_prf_cnts_end = .; + } + + __llvm_orderfile : ALIGN(8) { + PROVIDE(__start___llvm_orderfile = .); + KEEP(*(__llvm_orderfile)) + PROVIDE(__stop___llvm_orderfile = .); + } + +} +INSERT AFTER .tbss; diff --git a/platforms/kbootloader/src/arch/aarch64/bootargs.rs b/platforms/kbootloader/src/arch/aarch64/bootargs.rs new file mode 100644 index 0000000000000000000000000000000000000000..573a78d93218630435851fa472f81e285575953f --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/bootargs.rs @@ -0,0 +1,36 @@ +use core::mem::MaybeUninit; + +#[repr(C, align(64))] +#[derive(Clone)] +pub struct EarlyBootArgs { + pub args: [usize; 4], + pub virt_entry: *mut (), + pub kimage_addr_lma: *mut (), + pub kimage_addr_vma: *mut (), + pub stack_top_lma: *mut (), + pub stack_top_vma: *mut (), + pub kcode_end: *mut (), + pub el: usize, + pub kliner_offset: usize, + pub page_size: usize, + pub debug: usize, +} + +impl EarlyBootArgs { + pub const fn new() -> Self { + unsafe { MaybeUninit::zeroed().assume_init() } + } + + pub fn debug(&self) -> bool { + self.debug > 0 + } +} + +impl Default for EarlyBootArgs { + fn default() -> Self { + Self::new() + } +} + +#[unsafe(link_section = ".data")] +pub static mut BOOT_ARGS: EarlyBootArgs = EarlyBootArgs::new(); diff --git a/arch/kcpu/src/aarch64/boot.rs b/platforms/kbootloader/src/arch/aarch64/el.rs similarity index 44% rename from arch/kcpu/src/aarch64/boot.rs rename to platforms/kbootloader/src/arch/aarch64/el.rs index 27ef39734d7c6e7be7b3035d4cebafe159307074..bd8de611f97696f843ce9da57df7f4fca286f253 100644 --- a/arch/kcpu/src/aarch64/boot.rs +++ b/platforms/kbootloader/src/arch/aarch64/el.rs @@ -2,10 +2,9 @@ // Copyright 2025 KylinSoft Co., Ltd. // See LICENSES for license details. -//! Helper functions to initialize the CPU states on systems bootstrapping. +//! AArch64 exception level switching for early boot. use aarch64_cpu::{asm::barrier, registers::*}; -use memaddr::PhysAddr; /// Swtich current exception level to EL1. /// @@ -16,8 +15,11 @@ use memaddr::PhysAddr; /// # Safety /// /// This function is unsafe as it changes the CPU mode. +#[unsafe(link_section = ".idmap.text")] pub unsafe fn switch_to_el1() { + let current_sp = aarch64_cpu::registers::SP.get(); SPSel.write(SPSel::SP::ELx); + aarch64_cpu::registers::SP.set(current_sp); SP_EL0.set(0); let current_el = CurrentEL.read(CurrentEL::EL); if current_el >= 2 { @@ -51,67 +53,7 @@ pub unsafe fn switch_to_el1() { ); SP_EL1.set(SP.get()); ELR_EL2.set(LR.get()); + barrier::isb(barrier::SY); aarch64_cpu::asm::eret(); } } - -/// Configures and enables the MMU on the current CPU. -/// -/// It first sets `MAIR_EL1`, `TCR_EL1`, `TTBR0_EL1`, `TTBR1_EL1` registers to -/// the conventional values, and then enables the MMU and caches by setting -/// `SCTLR_EL1`. -/// -/// # Safety -/// -/// This function is unsafe as it changes the address translation configuration. -pub unsafe fn init_mmu(root_paddr: PhysAddr) { - use page_table::aarch64::Arm64MemAttr as MemAttr; - - MAIR_EL1.set(MemAttr::MAIR_VALUE); - - // Enable TTBR0 and TTBR1 walks, page size = 4K, vaddr size = 48 bits, paddr size = 48 bits. - let tcr_flags0 = TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::TG0::KiB_4 - + TCR_EL1::SH0::Inner - + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::T0SZ.val(16); - let tcr_flags1 = TCR_EL1::EPD1::EnableTTBR1Walks - + TCR_EL1::TG1::KiB_4 - + TCR_EL1::SH1::Inner - + TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::T1SZ.val(16); - TCR_EL1.write(TCR_EL1::IPS::Bits_48 + tcr_flags0 + tcr_flags1); - barrier::isb(barrier::SY); - - // Set both TTBR0 and TTBR1 - let root_paddr = root_paddr.as_usize() as u64; - TTBR0_EL1.set(root_paddr); - TTBR1_EL1.set(root_paddr); - - // Flush the entire TLB - karch::flush_tlb(None); - - // Enable the MMU and turn on I-cache and D-cache - SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable); - // Disable SPAN - SCTLR_EL1.set(SCTLR_EL1.get() | (1 << 23)); - barrier::isb(barrier::SY); -} - -/// Initializes trap handling on the current CPU. -/// -/// In detail, it initializes the exception vector, and sets `TTBR0_EL1` to 0 to -/// block low address access. -pub fn init_trap() { - #[cfg(feature = "uspace")] - crate::userspace_common::init_exception_table(); - unsafe extern "C" { - fn exception_vector_base(); - } - unsafe { - karch::write_trap_vector_base(exception_vector_base as *const () as usize); - karch::write_user_page_table(0.into()); - } -} diff --git a/platforms/kbootloader/src/arch/aarch64/entry.rs b/platforms/kbootloader/src/arch/aarch64/entry.rs new file mode 100644 index 0000000000000000000000000000000000000000..4f2dc1680879107827f177e8c7348bd7016b764e --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/entry.rs @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +//! AArch64 position-independent boot entry. +//! +//! Boot flow +//! --------- +//! ```text +//! _start (.head.text) +//! └─ primary_entry (.idmap.text) +//! ├─ preserve_boot_args() – save x0-x3 (DTB, …) via adrp +//! ├─ switch_to_el1() – EL3/EL2 → EL1 transition +//! ├─ enable_fp() – enable FP/SIMD +//! ├─ create_boot_page_tables() – build idmap + kernel high map +//! ├─ init_mmu() – set MAIR/TCR/TTBR, enable MMU +//! └─ __primary_switched() (virtual address) +//! ├─ zero BSS +//! └─ kplat::entry(cpu_id, dtb) +//! ``` + +use core::arch::naked_asm; + +use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET}; + +use super::{el, mmu}; + +// Linux ARM64 Boot Protocol image flags. +const FLAG_LE: usize = 0b0; +const FLAG_PAGE_SIZE_4K: usize = 0b10; +const FLAG_ANY_MEM: usize = 0b1000; + +/// Boot stack for the primary CPU. +#[unsafe(link_section = ".bss.stack")] +static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; + +/// Storage for the boot arguments passed in x0-x3 by firmware/bootloader. +#[unsafe(link_section = ".data")] +static mut SAVED_BOOT_ARGS: [u64; 4] = [0; 4]; + +/// Linux ARM64 Boot Protocol header followed by a branch to `primary_entry`. +#[unsafe(naked)] +#[unsafe(no_mangle)] +#[unsafe(link_section = ".head.text")] +pub unsafe extern "C" fn _start() -> ! { + naked_asm!( + "add x13, x18, #0x16", // "MZ" magic (valid ARM64 no-op instruction) + "bl {entry}", // branch to kernel start + ".quad 0", // image load offset from RAM base (little-endian) + ".quad _ekernel - _start", // effective image size + ".quad {flags}", // kernel flags + ".quad 0", // reserved + ".quad 0", // reserved + ".quad 0", // reserved + ".ascii \"ARM\\x64\"", // magic number + ".long 0", // reserved (PE COFF offset) + flags = const FLAG_LE | FLAG_PAGE_SIZE_4K | FLAG_ANY_MEM, + entry = sym primary_entry, + ) +} + +/// Primary CPU early boot entry (runs before MMU is enabled). +/// +/// All code here is position-independent – only PC-relative addressing is +/// used for data, except for `ldr x8, =sym` literal-pool loads which +/// intentionally load the *linked* virtual address so that the `br x8` +/// after MMU-enable jumps to the correct high-virtual-address symbol. +#[unsafe(naked)] +#[unsafe(no_mangle)] +#[unsafe(link_section = ".idmap.text")] +pub unsafe extern "C" fn primary_entry() -> ! { + naked_asm!( + // Capture CPU ID from MPIDR_EL1[23:0] (Aff2|Aff1|Aff0) and DTB + // pointer before any call clobbers them. This simplified affinity + // masking follows the same convention used by other x-kernel platforms + // (e.g. aarch64-qemu-virt) and is sufficient for typical SMP + // configurations where Aff3 is zero. + "mrs x19, mpidr_el1", + "and x19, x19, #0xffffff", // CPU affinity Aff2|Aff1|Aff0 + "mov x20, x0", // save DTB physical address + + // Save firmware boot arguments (x0-x3) to SAVED_BOOT_ARGS via adrp. + "bl {preserve_boot_args}", + + // Set up the early boot stack using PC-relative addressing. + "adrp x8, {boot_stack}", + "add x8, x8, :lo12:{boot_stack}", + "add x8, x8, {boot_stack_size}", + "mov sp, x8", + + // Drop to EL1 (no-op when already at EL1). + "bl {switch_to_el1}", + + // Enable FP/SIMD so that Rust code can use float registers. + "bl {enable_fp}", + + // Build the two-level boot page tables (idmap + kernel high map). + "bl {create_boot_page_tables}", + + // Program MAIR/TCR/TTBR and enable the MMU. + "bl {init_mmu}", + + // Switch the stack pointer to its high virtual address. + "mov x8, {phys_virt_offset}", + "add sp, sp, x8", + + // Restore cpu_id and DTB for __primary_switched. + "mov x0, x19", + "mov x1, x20", + + // Jump to the virtual address of __primary_switched. + // `ldr x8, =sym` loads the *linked* VMA from the literal pool so + // that the branch targets the high-VA mapping set up above. + "ldr x8, ={primary_switched}", + "blr x8", + "b .", + + preserve_boot_args = sym preserve_boot_args, + boot_stack = sym BOOT_STACK, + boot_stack_size = const BOOT_STACK_SIZE, + switch_to_el1 = sym el::switch_to_el1, + enable_fp = sym enable_fp, + create_boot_page_tables = sym mmu::create_boot_page_tables, + init_mmu = sym mmu::init_mmu, + phys_virt_offset = const PHYS_VIRT_OFFSET, + primary_switched = sym __primary_switched, + ) +} + +/// Save x0-x3 (firmware boot arguments) to [`SAVED_BOOT_ARGS`]. +/// +/// Uses PC-relative addressing so this can run before the MMU is on. +#[unsafe(naked)] +#[unsafe(no_mangle)] +#[unsafe(link_section = ".idmap.text")] +pub unsafe extern "C" fn preserve_boot_args() { + naked_asm!( + // Get the physical address of SAVED_BOOT_ARGS via adrp/add. + "adrp x8, {saved_args}", + "add x8, x8, :lo12:{saved_args}", + // Store x0..x3. + "stp x0, x1, [x8]", + "stp x2, x3, [x8, #16]", + // Full system barrier so the stores complete before the MMU is enabled. + "dmb sy", + "ret", + saved_args = sym SAVED_BOOT_ARGS, + ) +} + +/// Secondary CPU boot entry. +/// +/// Called with `x0` = top of a pre-allocated stack. +/// +/// # Safety +/// +/// Must only be called from secondary CPUs, with `x0` = top of a pre-allocated stack. +#[unsafe(naked)] +#[unsafe(no_mangle)] +#[unsafe(link_section = ".idmap.text")] +pub unsafe extern "C" fn _start_secondary() -> ! { + naked_asm!( + "mrs x19, mpidr_el1", + "and x19, x19, #0xffffff", // CPU affinity Aff2|Aff1|Aff0 (see primary_entry) + "mov sp, x0", // stack passed in x0 + "bl {switch_to_el1}", + "bl {enable_fp}", + "bl {init_mmu}", + "mov x8, {phys_virt_offset}", + "add sp, sp, x8", + "mov x0, x19", // cpu_id + "ldr x8, ={entry_secondary}", + "br x8", + switch_to_el1 = sym el::switch_to_el1, + enable_fp = sym enable_fp, + init_mmu = sym mmu::init_mmu, + phys_virt_offset = const PHYS_VIRT_OFFSET, + entry_secondary = sym __secondary_switched, + ) +} + +pub unsafe extern "C" fn __secondary_switched(cpu_id: usize) { + call_kernel_entry!(SECOND_KERNEL_ENTRY, cpu_id) +} + +/// Post-MMU entry point – runs at the kernel's high virtual address. +/// +/// Zeroes BSS, then calls [`kplat::entry`] with the boot CPU id and DTB +/// physical address. +/// +/// # Safety +/// +/// Must only be called once, from [`primary_entry`], after the MMU has been +/// enabled and the stack pointer adjusted to a virtual address. +pub unsafe extern "C" fn __primary_switched(cpu_id: usize, dtb_paddr: usize) { + super::serial::boot_print_str("[boot] Entered primary switched entry\n"); + // Zero BSS. + unsafe extern "C" { + fn _sbss(); + fn _ebss(); + } + unsafe { + let bss_start = _sbss as *const () as usize; + let bss_end = _ebss as *const () as usize; + core::slice::from_raw_parts_mut(bss_start as *mut u8, bss_end - bss_start).fill(0); + } + call_kernel_entry!(PRIMARY_KERNEL_ENTRY, cpu_id, dtb_paddr) +} + +/// Enable FP/SIMD by clearing traps in `CPACR_EL1`. +#[unsafe(link_section = ".idmap.text")] +fn enable_fp() { + #[cfg(feature = "fp-simd")] + karch::enable_fp(); +} diff --git a/platforms/kbootloader/src/arch/aarch64/layout.rs b/platforms/kbootloader/src/arch/aarch64/layout.rs new file mode 100644 index 0000000000000000000000000000000000000000..1335f447f8fd100619576e765eaa42d9617cc822 --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/layout.rs @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +/// This module defines constants related to virtual address space layout, page sizes, and provides +/// utilities for calculating page offsets and limits. +/// The constants are defined based on the architecture's virtual address space and page table +/// structure. +/// The module also calculates the virtual address ranges for kernel image and modules based on the +/// defined constants. +/// The page size is set to 4KB (1 << 12), and the virtual address space is defined as 48 bits, +/// which is common for 64-bit architectures with canonical form. The module also includes utilities for +/// calculating page offsets and limits based on the virtual address bits. +/// The constants defined in this module are used throughout the kernel for memory management, +/// paging, and virtual address calculations. + +use crate::size_consts::*; + +const MODULES_VADDR: usize = _page_end(PG_VA_BITS); + +const VSIZE_P: usize = 0x10; + +const MODULES_VSIZE: usize = (1usize << PG_VA_BITS) / VSIZE_P * 0x8; + +pub const KIMAGE_VSIZE: usize = (1usize << PG_VA_BITS) / VSIZE_P; + +pub const KIMAGE_VADDR: usize = MODULES_VADDR + MODULES_VSIZE; + +pub const KLINER_OFFSET: usize = KIMAGE_VADDR + KIMAGE_VSIZE; + diff --git a/platforms/kbootloader/src/arch/aarch64/map_range.rs b/platforms/kbootloader/src/arch/aarch64/map_range.rs new file mode 100644 index 0000000000000000000000000000000000000000..6144094bdaa3ffc985a19bd41ffa44882936c4e2 --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/map_range.rs @@ -0,0 +1,409 @@ +//! Map range +use core::cmp::min; + +use aarch64_cpu::asm::barrier::{isb, dsb, ISHST}; + +use kernel::{ + arch::arm64::{ + asm::{ + tlb::TlbFlushOps, + }, + early_debug::{early_uart_put_u64_hex, early_uart_putchar}, + mm::Arm64VaLayout, + pgtable::{ + idmap::InitIdmap, Arm64PgtableConfig, PgTableEntry, PgdirEntry, PmdEntry, PteEntry, + PtePgProt, PteTable, + }, + sysregs::Ttbr1El1, + }, + global_sym::*, + klib::string::{memcpy, memset}, + macros::{page_aligned, section_init_data, section_init_text}, + mm::page::PageConfig, + page_align, +}; + +/// map-range - Map a contiguous range of physical pages into virtual memory +/// +/// # Arguments +/// +/// * `pte` - Address of physical pointer to array of pages to allocate page tables from +/// * `pgd` - Address of physical pointer to array of pages to allocate page tables from +/// * `start` - Virtual address of the start of the range +/// * `end` - Virtual address of the end of the range (exclusive) +/// * `pa` - Physical address of the start of the range +/// * `prot` - Access permissions of the range +/// * `level` - Translation level for the mapping +/// * `tbl` - The level `level` page table to create the mappings in +/// * `may_use_cont` - Whether the use of the contiguous attribute is allowed +/// * `va_offset` - Offset between a physical page and its current mapping in the VA space +#[unsafe(no_mangle)] +#[section_init_text] +pub unsafe extern "C" fn map_range( + pte: &mut usize, + start: usize, + end: usize, + pa: usize, + prot: PtePgProt, + level: usize, + tbl: *mut PteEntry, + may_use_cont: bool, + va_offset: usize, +) { + // continue map mask + let mut cmask = usize::MAX; + if level == 3 { + cmask = PteTable::CONT_ENTRY_SIZE - 1; + } + + // remove type + let mut protval = PtePgProt::from_bits_truncate(prot.bits() & !PtePgProt::PTE_TYPE_MASK); + + let lshift: usize = (3 - level) * Arm64PgtableConfig::PTDESC_TABLE_SHIFT; + let lmask: usize = (PageConfig::PAGE_SIZE << lshift) - 1; + + let mut start = start & PageConfig::PAGE_MASK; + let mut pa = pa & PageConfig::PAGE_MASK; + + // Advance tbl to the entry that covers start + let mut tbl: *mut PteEntry = + unsafe { tbl.add((start >> (lshift + PageConfig::PAGE_SHIFT)) % PteTable::PTRS) }; + + // Set the right block/page bits for this level unless we are clearing the mapping + if !protval.is_empty() { + if level == 2 { + protval |= PtePgProt::from_bits_truncate(PmdEntry::PMD_TYPE_SECT); + } else { + protval |= PtePgProt::PTE_TYPE_PAGE; + } + } + + while start < end { + let next = min((start | lmask) + 1, page_align!(end)); + + if level < 2 || (level == 2 && ((start | next | pa) & lmask) != 0) { + // finer grained mapping + unsafe { + if (*tbl).is_none() { + // set tbl entry + let tbl_entry = PteEntry::new( + PteEntry::from_phys((*pte).into()).value() + | PmdEntry::PMD_TYPE_TABLE + | PmdEntry::PMD_TABLE_UXN, + ); + *tbl = tbl_entry; + // move pte to next page + *pte = ((*pte) as *mut PteEntry).add(PteTable::PTRS) as usize; + } + // map next level + map_range( + pte, + start, + next, + pa, + prot, + level + 1, + ((*tbl).to_phys().as_usize() + va_offset) as *mut PteEntry, + may_use_cont, + va_offset, + ); + } + } else { + // start a contiguous range if start and pa are suitably aligned + if ((start | pa) & cmask) == 0 && may_use_cont { + protval |= PtePgProt::PTE_CONT; + } + + // clear the contiguous attribute if the remaining range does not cover a contiguous block + if (end & !cmask) <= start { + protval &= !PtePgProt::PTE_CONT; + } + + // Put down a block or page mapping + let tbl_content: PteEntry = + PteEntry::new(PteEntry::from_phys(pa.into()).value() | protval.bits()); + + // set tbl entry + unsafe { + *tbl = tbl_content; + } + } + + pa += next - start; + start = next; + + // move tbl to next entry + unsafe { + tbl = tbl.add(1); + } + } +} + +#[page_aligned] +struct DevicePtes([u8; 8 * PageConfig::PAGE_SIZE]); + +#[section_init_data] +static mut DEVICE_PTES: DevicePtes = DevicePtes([0; 8 * PageConfig::PAGE_SIZE]); + +/// Create initial ID map +#[unsafe(no_mangle)] +#[section_init_text] +pub unsafe extern "C" fn create_init_idmap(pg_dir: *mut PgdirEntry, clrmask: u64) -> usize { + let mut pte = (pg_dir as usize) + PageConfig::PAGE_SIZE; + + let mut text_prot = PtePgProt::PAGE_KERNEL_ROX; + let mut data_prot = PtePgProt::PAGE_KERNEL; + let clrmask = PtePgProt::from_bits_truncate(clrmask); + text_prot &= !clrmask; + data_prot &= !clrmask; + + unsafe { + map_range( + &mut pte, + _stext as usize, + __initdata_begin as usize, + _stext as usize, + text_prot, + InitIdmap::ROOT_LEVEL, + pg_dir as *mut PteEntry, + false, + 0, + ); + map_range( + &mut pte, + __initdata_begin as usize, + _end as usize, + __initdata_begin as usize, + data_prot, + InitIdmap::ROOT_LEVEL, + pg_dir as *mut PteEntry, + false, + 0, + ); + + let device_prot = PtePgProt::PROT_DEVICE_nGnRnE; + let mut pte2 = &raw const DEVICE_PTES.0 as *mut DevicePtes as usize; + let uart_device = kernel::arch::arm64::early_debug::EARLY_UART_BASE; + map_range( + &mut pte2, + uart_device - 0x1000, + uart_device + 0x1000, + uart_device - 0x1000, + device_prot, + InitIdmap::ROOT_LEVEL, + pg_dir as *mut PteEntry, + false, + 0, + ); + } + pte +} + +#[page_aligned] +struct FdtPtes([u8; InitIdmap::EARLY_FDT_PAGE_SIZE]); + +#[section_init_data] +static mut FDT_PTES: FdtPtes = FdtPtes([0; InitIdmap::EARLY_FDT_PAGE_SIZE]); + +// Create fdt map +#[section_init_text] +fn map_fdt(fdt: usize) { + let efdt = fdt + InitIdmap::MAX_FDT_SIZE; + unsafe { + let mut pte = &raw const FDT_PTES.0 as *mut FdtPtes as usize; + map_range( + &mut pte, + fdt, + min(_text as usize, efdt), + fdt, + PtePgProt::PAGE_KERNEL, + InitIdmap::ROOT_LEVEL, + init_idmap_pg_dir as *mut PteEntry, + false, + 0, + ); + } + dsb(ISHST); +} + +// Create kernel map +#[section_init_text] +fn map_segment( + pg_dir: *mut PgdirEntry, + pte: &mut usize, + va_offset: usize, + start: usize, + end: usize, + prot: PtePgProt, + may_use_cont: bool, + root_level: usize, +) { + unsafe { + map_range( + pte, + ((start + va_offset) & !Arm64VaLayout::KERNNEL_VA_START) as usize, + ((end + va_offset) & !Arm64VaLayout::KERNNEL_VA_START) as usize, + start, + prot, + root_level, + pg_dir as *mut PteEntry, + may_use_cont, + 0, + ); + } +} + +// Create kernel map +#[section_init_text] +fn map_kernel(va_offset: usize) { + let rootlevel = 4 - Arm64PgtableConfig::PGTABLE_LEVELS; + let mut pte = (init_pg_dir as usize) + PageConfig::PAGE_SIZE; + let text_prot = PtePgProt::PAGE_KERNEL_ROX; + let data_prot = PtePgProt::PAGE_KERNEL; + // text segment + map_segment( + init_pg_dir as *mut PgdirEntry, + &mut pte, + va_offset, + _stext as usize, + _etext as usize, + text_prot, + true, + rootlevel, + ); + + // rodata segment: include swapper_pg_dir reserved_pg_dir + map_segment( + init_pg_dir as *mut PgdirEntry, + &mut pte, + va_offset, + __start_rodata as usize, + __inittext_begin as usize, + data_prot, + false, + rootlevel, + ); + + // init text segment + map_segment( + init_pg_dir as *mut PgdirEntry, + &mut pte, + va_offset, + __inittext_begin as usize, + __inittext_end as usize, + text_prot, + false, + rootlevel, + ); + + // init data segment + map_segment( + init_pg_dir as *mut PgdirEntry, + &mut pte, + va_offset, + __initdata_begin as usize, + __initdata_end as usize, + data_prot, + false, + rootlevel, + ); + + // data segment + map_segment( + init_pg_dir as *mut PgdirEntry, + &mut pte, + va_offset, + _data as usize, + _end as usize, + data_prot, + true, + rootlevel, + ); + + // map uart device + unsafe { + let device_prot = PtePgProt::PROT_DEVICE_nGnRnE; + let mut pte2 = &raw const DEVICE_PTES.0 as *mut DevicePtes as usize; + let uart_device = kernel::arch::arm64::early_debug::EARLY_UART_BASE; + map_range( + &mut pte2, + uart_device - 0x1000, + uart_device + 0x1000, + uart_device - 0x1000, + device_prot, + rootlevel, + init_pg_dir as *mut PteEntry, + false, + 0, + ); + } + + dsb(ISHST); + idmap_cpu_replace_ttbr1(init_pg_dir as usize); + // Copy the root page table to its final location + // Here swapper_pg_dir must use VA,since on stage1 idmap swapper_pg_dir + // is mapped to text segment,it does not have write permission. + // init_pg_dir can use PA because on stage1 init_pg_dir is mapped to + // data segment, it has write/read permission. + memcpy( + (swapper_pg_dir as usize + va_offset) as *mut u8, + init_pg_dir as *mut u8, + PageConfig::PAGE_SIZE, + ); + idmap_cpu_replace_ttbr1(swapper_pg_dir as usize); +} + +/// Create initial ID map +#[unsafe(no_mangle)] +#[section_init_text] +pub unsafe extern "C" fn early_map_kernel(_boot_status: usize, fdt: usize) { + map_fdt(fdt); + // clear ZERO section + memset( + __bss_start as *mut u8, + 0, + init_pg_end as usize - __bss_start as usize, + ); + let va_base = Arm64VaLayout::KIMAGE_VADDR; + let pa_base = _text as usize; + map_kernel(va_base - pa_base); +} + +#[inline(always)] +fn __idmap_cpu_set_reserved_ttbr1() { + Ttbr1El1::write_pg_dir(reserved_pg_dir as u64); + isb(); + TlbFlushOps::local_flush_tlb_all(); +} + +// Should not be called by anything else. It can only be executed from a TTBR0 mapping. +#[inline(always)] +fn idmap_cpu_replace_ttbr1(ttbr1: usize) { + __idmap_cpu_set_reserved_ttbr1(); + Ttbr1El1::write_pg_dir(ttbr1 as u64); + isb(); +} + +#[allow(dead_code)] +fn dump_page_table(start_addr: usize, end_addr: usize) { + let mut addr = start_addr; + let items_per_line = 4; + early_uart_putchar(b'\n'); + while addr < end_addr { + early_uart_put_u64_hex(addr as u64); + early_uart_putchar(b':'); + early_uart_putchar(b' '); + + for _i in 0..items_per_line { + if addr >= end_addr { + break; + } + let value = unsafe { core::ptr::read_volatile(addr as *const u64) }; + early_uart_put_u64_hex(value); + early_uart_putchar(b' '); + addr += core::mem::size_of::(); + } + early_uart_putchar(b'\n'); + } +} + diff --git a/platforms/kbootloader/src/arch/aarch64/mmu.rs b/platforms/kbootloader/src/arch/aarch64/mmu.rs new file mode 100644 index 0000000000000000000000000000000000000000..7ae0778689def0b83d951ecafaf07e7028116739 --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/mmu.rs @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +//! Early boot page table setup and MMU initialisation for AArch64. +//! +//! All code in this module that runs before the MMU is enabled lives in +//! `.idmap.text` and uses only PC-relative addressing to obtain physical +//! addresses of data. + +use aarch64_cpu::{asm::barrier, registers::*}; +use kbuild_config::{BOOT_DEVICE_MM, BOOT_NORMAL_MM}; +use memaddr::PhysAddr; +use page_table::{ + PageTableEntry, PagingFlags, + aarch64::{A64PageEntry, Arm64MemAttr}, +}; + +/// A page-aligned wrapper used to place page table arrays in the correct +/// linker section with the required 4 KiB alignment. +#[repr(C, align(4096))] +struct PageAligned(T); + +impl PageAligned { + const fn new(val: T) -> Self { + Self(val) + } +} + +impl core::ops::Index for PageAligned<[T; N]> { + type Output = T; + + fn index(&self, idx: usize) -> &T { + &self.0[idx] + } +} + +impl core::ops::IndexMut for PageAligned<[T; N]> { + fn index_mut(&mut self, idx: usize) -> &mut T { + &mut self.0[idx] + } +} + +/// Level-0 boot page table (shared between TTBR0 and TTBR1). +#[unsafe(link_section = ".data.boot_page_table")] +static mut BOOT_PT_L0: PageAligned<[A64PageEntry; 512]> = + PageAligned::new([A64PageEntry::empty(); 512]); + +/// Level-1 page table for the identity map / kernel map. +/// +/// Because TTBR0 and TTBR1 share the same L0 table, a single L1 table +/// covers both the low (identity) and high (virtual kernel) windows. +#[unsafe(link_section = ".data.boot_page_table")] +static mut BOOT_PT_L1: PageAligned<[A64PageEntry; 512]> = + PageAligned::new([A64PageEntry::empty(); 512]); + +/// Build the minimal boot page tables required to switch the MMU on. +/// +/// The two tables form a two-level walk: +/// +/// ```text +/// TTBR0/TTBR1 → BOOT_PT_L0[0] → BOOT_PT_L1 +/// For each (start, end) in BOOT_DEVICE_MM: L1[start/1GiB .. end/1GiB] → Device RW +/// For each (start, end) in BOOT_NORMAL_MM: L1[start/1GiB .. end/1GiB] → Normal RWX +/// ``` +/// +/// Both `BOOT_DEVICE_MM` and `BOOT_NORMAL_MM` are read from `kbuild_config` +/// (populated from the platform defconfig). Every interval in these slices +/// **must** be 1 GiB-aligned (`start` and `end` are multiples of +/// `0x4000_0000`). A single L1 table covers the first 512 GiB, which is +/// sufficient for all currently supported boot scenarios. +/// +/// The same L0 table is used for both TTBR0 (low addresses) and TTBR1 +/// (high addresses) because the kernel virtual base has the same L0/L1 +/// indices as its physical address when the top 16 bits are masked off. +/// +/// # Safety +/// +/// Must be called before the MMU is enabled. All memory accesses use +/// physical addresses obtained via `adrp`/`add` (PC-relative). +#[unsafe(link_section = ".idmap.text")] +pub unsafe fn create_boot_page_tables() { + const GIB: usize = 0x4000_0000; + + let l1_pa: usize; + + unsafe { + core::arch::asm!( + "adrp {out}, {sym}", + "add {out}, {out}, :lo12:{sym}", + sym = sym BOOT_PT_L1, + out = out(reg) l1_pa, + options(pure, nomem, nostack), + ); + } + + // L0[0] → L1 table (covers the first 512 GiB) + unsafe { + BOOT_PT_L0[0] = A64PageEntry::new_table(PhysAddr::from(l1_pa)); + } + + // Map each platform-defined device memory interval as Device RW 1 GiB blocks. + for &(start, end) in BOOT_DEVICE_MM { + let mut addr = start; + while addr < end { + unsafe { + BOOT_PT_L1[addr / GIB] = A64PageEntry::new_page( + PhysAddr::from(addr), + PagingFlags::READ | PagingFlags::WRITE | PagingFlags::DEVICE, + true, // 1 GiB block + ); + } + addr += GIB; + } + } + + // Map each platform-defined normal memory interval as Normal RWX 1 GiB blocks. + for &(start, end) in BOOT_NORMAL_MM { + let mut addr = start; + while addr < end { + unsafe { + BOOT_PT_L1[addr / GIB] = A64PageEntry::new_page( + PhysAddr::from(addr), + PagingFlags::READ | PagingFlags::WRITE | PagingFlags::EXECUTE, + true, // 1 GiB block + ); + } + addr += GIB; + } + } + + // Ensure all page table writes are visible before enabling the MMU. + barrier::dsb(barrier::SY); +} + +/// Configure MMU registers and enable the MMU. +/// +/// Sets `MAIR_EL1`, `TCR_EL1`, `TTBR0_EL1`, `TTBR1_EL1` and then turns +/// the MMU on via `SCTLR_EL1`. +/// +/// # Safety +/// +/// Must be called after [`create_boot_page_tables`] and before any code +/// that relies on virtual addresses. +#[unsafe(link_section = ".idmap.text")] +pub unsafe fn init_mmu() { + // Obtain physical address of L0 page table via PC-relative addressing. + let root_pa: usize; + unsafe { + core::arch::asm!( + "adrp {out}, {sym}", + "add {out}, {out}, :lo12:{sym}", + sym = sym BOOT_PT_L0, + out = out(reg) root_pa, + options(pure, nomem, nostack), + ); + } + + // Program memory attributes. + MAIR_EL1.set(Arm64MemAttr::MAIR_VALUE); + + // Configure TCR_EL1: 4 KiB granule, 48-bit VA, 48-bit PA, inner-shareable + // write-back cacheable walks for both TTBR0 (T0SZ=16) and TTBR1 (T1SZ=16). + let tcr_flags0 = TCR_EL1::EPD0::EnableTTBR0Walks + + TCR_EL1::TG0::KiB_4 + + TCR_EL1::SH0::Inner + + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::T0SZ.val(16); + let tcr_flags1 = TCR_EL1::EPD1::EnableTTBR1Walks + + TCR_EL1::TG1::KiB_4 + + TCR_EL1::SH1::Inner + + TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::T1SZ.val(16); + TCR_EL1.write(TCR_EL1::IPS::Bits_48 + tcr_flags0 + tcr_flags1); + barrier::isb(barrier::SY); + + // Point both TTBR0 and TTBR1 at the same L0 table so that low (identity) + // and high (kernel) virtual addresses are both accessible right after the + // MMU is enabled. + let root_pa_u64 = root_pa as u64; + TTBR0_EL1.set(root_pa_u64); + TTBR1_EL1.set(root_pa_u64); + + // Flush the entire TLB before enabling the MMU. + karch::flush_tlb(None); + + // Enable the MMU and turn on I-cache and D-cache. + SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable); + // Disable SPAN + SCTLR_EL1.set(SCTLR_EL1.get() | (1 << 23)); + barrier::isb(barrier::SY); + + super::serial::boot_print_str("[boot] MMU enabled, root page table at PA "); + super::serial::boot_print_usize(root_pa); + super::serial::boot_print_str("\r\n"); + unsafe extern "C" { + fn _start(); + } + let start_pa: usize = _start as *const () as _; + super::serial::boot_print_str("start PA is "); + super::serial::boot_print_usize(start_pa); + super::serial::boot_print_str("\r\n"); +} diff --git a/platforms/kbootloader/src/arch/aarch64/mod.rs b/platforms/kbootloader/src/arch/aarch64/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ffa76914a0b5bf1886194e0fa3ede25d68ed17d --- /dev/null +++ b/platforms/kbootloader/src/arch/aarch64/mod.rs @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +mod el; +mod entry; +pub use entry::_start_secondary; + +mod mmu; +mod serial; diff --git a/platforms/aarch64-crosvm-virt/src/serial.rs b/platforms/kbootloader/src/arch/aarch64/serial.rs similarity index 89% rename from platforms/aarch64-crosvm-virt/src/serial.rs rename to platforms/kbootloader/src/arch/aarch64/serial.rs index d999d3949d06ca0a4a679648d0531c2cec0e7f5f..a5848b88657b2c6482726a161da9f6aa95991aeb 100644 --- a/platforms/aarch64-crosvm-virt/src/serial.rs +++ b/platforms/kbootloader/src/arch/aarch64/serial.rs @@ -10,15 +10,15 @@ pub extern "C" fn _boot_print_usize(num: usize) { let mut cnt = 0; boot_print_str("0x"); if num == 0 { - boot_serial_send('0' as u8); + boot_serial_send(b'0'); } else { loop { if num == 0 { break; } msg[cnt] = match (num & 0xf) as u8 { - n if n < 10 => n + '0' as u8, - n => n - 10 + 'a' as u8, + n if n < 10 => n + b'0', + n => n - 10 + b'a', }; cnt += 1; num >>= 4; @@ -29,7 +29,9 @@ pub extern "C" fn _boot_print_usize(num: usize) { } boot_print_str("\r\n"); } + #[unsafe(no_mangle)] +#[unsafe(link_section = ".idmap.text")] /// Write a string to the boot UART. pub fn boot_print_str(data: &str) { for byte in data.bytes() { @@ -46,13 +48,16 @@ pub fn boot_print_usize(num: usize) { pub struct Uart { base_address: usize, } + impl Uart { /// Create a UART instance backed by an MMIO base address. + #[unsafe(link_section = ".idmap.text")] pub const fn new(base_address: usize) -> Self { Self { base_address } } /// Write a byte to the UART TX register. + #[unsafe(link_section = ".idmap.text")] pub fn put(&self, c: u8) -> Option { let ptr = self.base_address as *mut u8; unsafe { @@ -61,7 +66,9 @@ impl Uart { Some(c) } } -static BOOT_SERIAL: Uart = Uart::new(0x3f8); + +// change this to adapt to different hardware. The default is the standard PC COM1 port. +static BOOT_SERIAL: Uart = Uart::new(kbuild_config::UART_PADDR); #[allow(dead_code)] pub fn print_el1_reg(switch: bool) { if !switch { @@ -113,7 +120,9 @@ macro_rules! boot_print_reg { boot_print_usize(reg); }; } + #[allow(unused)] +#[unsafe(link_section = ".idmap.text")] /// Send a single byte to the boot UART. pub fn boot_serial_send(data: u8) { unsafe { BOOT_SERIAL.put(data) }; diff --git a/platforms/kbootloader/src/arch/mod.rs b/platforms/kbootloader/src/arch/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..662e66274bf12d6a4c45c6ed349c7b28506c3d4c --- /dev/null +++ b/platforms/kbootloader/src/arch/mod.rs @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +#[cfg(target_arch = "aarch64")] +pub mod aarch64; +#[cfg(target_arch = "aarch64")] +pub use self::aarch64::*; + +#[cfg(target_arch = "x86_64")] +pub mod x86_64; +#[cfg(target_arch = "x86_64")] +pub use self::x86_64::*; diff --git a/platforms/x86-csv/src/ap_start.S b/platforms/kbootloader/src/arch/x86_64/ap_start.S similarity index 100% rename from platforms/x86-csv/src/ap_start.S rename to platforms/kbootloader/src/arch/x86_64/ap_start.S diff --git a/platforms/kbootloader/src/arch/x86_64/entry.rs b/platforms/kbootloader/src/arch/x86_64/entry.rs new file mode 100644 index 0000000000000000000000000000000000000000..a36279adcc6b9d944d49c448987f780863116142 --- /dev/null +++ b/platforms/kbootloader/src/arch/x86_64/entry.rs @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +//! x86_64 Multiboot boot entry for kbootloader. +//! +//! Boot flow +//! --------- +//! ```text +//! _start (.text.boot, 32-bit multiboot entry) +//! └─ bsp_entry32 – set GDT, CR4, page tables, EFER, CR0, jump to 64-bit +//! └─ bsp_entry64 +//! └─ rust_entry(magic, mbi) +//! └─ PRIMARY_KERNEL_ENTRY(cpu_id, mbi) +//! +//! ap_entry32 (.text.boot, AP 32-bit entry from ap_start.S trampoline) +//! └─ ap_entry64 +//! └─ rust_entry_secondary(magic) +//! └─ SECOND_KERNEL_ENTRY(cpu_id) +//! ``` + +use core::arch::global_asm; + +use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET, SEV_CBIT_POS}; +use x86_64::registers::{ + control::{Cr0Flags, Cr4Flags}, + model_specific::EferFlags, +}; + +const MULTIBOOT_HEADER_FLAGS: usize = 0x0001_0002; +const MULTIBOOT_HEADER_MAGIC: usize = 0x1BADB002; +pub const MULTIBOOT_BOOTLOADER_MAGIC: usize = 0x2BADB002; + +const CR0: u64 = Cr0Flags::PROTECTED_MODE_ENABLE.bits() + | Cr0Flags::MONITOR_COPROCESSOR.bits() + | Cr0Flags::NUMERIC_ERROR.bits() + | Cr0Flags::WRITE_PROTECT.bits() + | Cr0Flags::PAGING.bits(); + +const CR4: u64 = Cr4Flags::PHYSICAL_ADDRESS_EXTENSION.bits() + | Cr4Flags::PAGE_GLOBAL.bits() + | if cfg!(feature = "fp-simd") { + Cr4Flags::OSFXSR.bits() | Cr4Flags::OSXMMEXCPT_ENABLE.bits() + } else { + 0 + }; + +const EFER: u64 = EferFlags::LONG_MODE_ENABLE.bits() | EferFlags::NO_EXECUTE_ENABLE.bits(); + +/// AMD SEV / CSV C-bit mask for page table entries. +/// Set to `1 << SEV_CBIT_POS` when SEV/CSV is active; zero otherwise. +/// Use `SEV_CBIT_POS=0` in defconfig to disable (qemu-virt). +pub const SEV_CBIT_MASK: u64 = if SEV_CBIT_POS == 0 { + 0 +} else { + 1u64 << SEV_CBIT_POS +}; + +/// Page index of the AP real-mode startup page (physical address = index × 4 KiB). +/// +/// Both x86_64 platforms use page 6 (0x6000). Exported so `mp.rs` can derive +/// the physical address and the SIPI vector from a single source of truth. +pub const AP_START_PAGE_IDX: u8 = 6; +pub const AP_START_PAGE_PADDR: usize = AP_START_PAGE_IDX as usize * 0x1000; + +/// Boot stack for the primary CPU. +#[unsafe(link_section = ".bss.stack")] +static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; + +global_asm!( + include_str!("ap_start.S"), + start_page_paddr = const AP_START_PAGE_PADDR, +); + +global_asm!( + include_str!("multiboot.S"), + mb_magic = const MULTIBOOT_BOOTLOADER_MAGIC, + mb_hdr_magic = const MULTIBOOT_HEADER_MAGIC, + mb_hdr_flags = const MULTIBOOT_HEADER_FLAGS, + entry = sym rust_entry, + entry_secondary = sym rust_entry_secondary, + offset = const PHYS_VIRT_OFFSET, + boot_stack_size = const BOOT_STACK_SIZE, + boot_stack = sym BOOT_STACK, + cr0 = const CR0, + cr4 = const CR4, + efer_msr = const x86::msr::IA32_EFER, + efer = const EFER, + cbit_mask = const SEV_CBIT_MASK, +); + +/// Read the initial APIC ID from CPUID leaf 1 (bits [31:24] of EBX). +/// Used as the logical CPU ID, matching the convention of both x86 platforms. +fn get_cpu_id() -> usize { + // rbx is reserved by LLVM; save/restore it around the CPUID instruction. + let ebx_val: u32; + unsafe { + core::arch::asm!( + "push rbx", + "cpuid", + "mov {:e}, ebx", + "pop rbx", + out(reg) ebx_val, + inout("eax") 1u32 => _, + out("ecx") _, + out("edx") _, + options(nostack, preserves_flags), + ); + } + ((ebx_val >> 24) & 0xff) as usize +} + +/// Primary CPU C entry: validates multiboot magic, then dispatches via +/// [`PRIMARY_KERNEL_ENTRY`](crate::PRIMARY_KERNEL_ENTRY). +#[unsafe(no_mangle)] +unsafe extern "C" fn rust_entry(magic: usize, mbi: usize) { + if magic == MULTIBOOT_BOOTLOADER_MAGIC { + call_kernel_entry!(PRIMARY_KERNEL_ENTRY, get_cpu_id(), mbi) + } + loop { + unsafe { core::arch::asm!("hlt", options(nostack, nomem)) } + } +} + +/// Secondary CPU C entry: dispatches via +/// [`SECOND_KERNEL_ENTRY`](crate::SECOND_KERNEL_ENTRY). +#[unsafe(no_mangle)] +unsafe extern "C" fn rust_entry_secondary(_magic: usize) { + if _magic == MULTIBOOT_BOOTLOADER_MAGIC { + call_kernel_entry!(SECOND_KERNEL_ENTRY, get_cpu_id()) + } + loop { + unsafe { core::arch::asm!("hlt", options(nostack, nomem)) } + } +} diff --git a/platforms/kbootloader/src/arch/x86_64/mod.rs b/platforms/kbootloader/src/arch/x86_64/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..b66342e97b359ea2b91a4605ee3f9af1636a8926 --- /dev/null +++ b/platforms/kbootloader/src/arch/x86_64/mod.rs @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +mod entry; + +pub use entry::{AP_START_PAGE_IDX, AP_START_PAGE_PADDR, MULTIBOOT_BOOTLOADER_MAGIC}; diff --git a/platforms/x86-csv/src/multiboot.S b/platforms/kbootloader/src/arch/x86_64/multiboot.S similarity index 91% rename from platforms/x86-csv/src/multiboot.S rename to platforms/kbootloader/src/arch/x86_64/multiboot.S index f2938838875d6e6d2f0bd9133a9a886afbb83fbb..9f3cf3bebcf7173c6f77589392ed5079b263ee05 100644 --- a/platforms/x86-csv/src/multiboot.S +++ b/platforms/kbootloader/src/arch/x86_64/multiboot.S @@ -1,5 +1,9 @@ # Bootstrapping from 32-bit with the Multiboot specification. # See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html +# +# Unified multiboot entry for x86_64-qemu-virt and x86-csv. +# The {cbit_mask} argument is 0 for non-SEV platforms and (1 << SEV_CBIT_POS) +# for CSV/SEV platforms such as x86-csv. .section .text.boot .code32 @@ -19,7 +23,7 @@ multiboot_header: .int _skernel - {offset} # load_addr .int _edata - {offset} # load_end .int _ebss - {offset} # bss_end_addr - .int _start32 - {offset} # entry_addr + .int _start32 - {offset} # entry_addr (32-bit multiboot entry for GRUB/QEMU) # Common code in 32-bit, prepare states to enter 64-bit. .macro ENTRY32_COMMON @@ -79,7 +83,6 @@ _start: jmp bsp_entry64 .code64 -.global bsp_entry64 bsp_entry64: ENTRY64_COMMON @@ -138,13 +141,13 @@ ap_entry64: .Ltmp_pdpt_low: .set i, 0 .rept 512 - .quad 0x40000000 * i | 0x83 | {cbit_mask} # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000 * i) + .quad 0x40000000 * i | 0x83 | {cbit_mask} # PRESENT | WRITABLE | HUGE_PAGE | paddr | C-bit .set i, i + 1 .endr .Ltmp_pdpt_high: .set i, 0 .rept 512 - .quad 0x40000000 * i | 0x83 | {cbit_mask} # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000 * i) + .quad 0x40000000 * i | 0x83 | {cbit_mask} # PRESENT | WRITABLE | HUGE_PAGE | paddr | C-bit .set i, i + 1 .endr diff --git a/platforms/kbootloader/src/bootinfo.rs b/platforms/kbootloader/src/bootinfo.rs new file mode 100644 index 0000000000000000000000000000000000000000..68960c13fb347c43ccfd80cc3ad8578fcfe411d1 --- /dev/null +++ b/platforms/kbootloader/src/bootinfo.rs @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +//! Unified boot information structure for all architectures and boot protocols. +//! +//! This module provides a standardized interface between bootloaders and the kernel, +//! abstracting away the details of different boot protocols (Multiboot, UEFI, Device Tree, etc.). +//! +//! # Design Principles +//! +//! - **Architecture Agnostic**: Works on x86_64, aarch64, riscv64, loongarch64 +//! - **Protocol Agnostic**: Supports Multiboot, UEFI, OpenSBI, U-Boot, etc. +//! - **FFI Safe**: Uses `#[repr(C)]` for cross-language compatibility +//! - **Extensible**: Version field allows future expansion +//! +//! # Usage +//! +//! ## Bootloader Side (construct BootInfo) +//! +//! ```rust,ignore +//! let boot_info = BootInfo::new(BootProtocol::Multiboot1) +//! .with_dtb(dtb_addr) +//! .with_cpu_id(0); +//! ``` +//! +//! ## Kernel Side (consume BootInfo) +//! +//! ```rust,ignore +//! pub fn entry(boot_info: &'static BootInfo) { +//! assert!(boot_info.is_valid()); +//! // ... initialize kernel +//! } +//! ``` + +use core::fmt; + +/// Magic number for BootInfo structure validation. +/// +/// ASCII: "BOOTINFO" = 0x424f4f54494e464f +const BOOT_INFO_MAGIC: u64 = 0x424f_4f54_494e_464f; + +/// Current BootInfo structure version. +/// +/// Increment this when making **incompatible** changes. +/// Bootloader and kernel must have matching major version. +const BOOT_INFO_VERSION: u32 = 1; + +/// Unified boot information passed from bootloader to kernel. +/// +/// # Memory Layout +/// +/// This structure is designed to be placed in a known memory location +/// or passed via register (depending on architecture): +/// +/// - **x86_64**: Address in `rdi` register +/// - **aarch64**: Address in `x0` register +/// - **riscv64**: Address in `a0` register +/// - **loongarch64**: Address in `$a0` register +/// +/// # Lifetime +/// +/// The BootInfo and all referenced data (strings, etc.) +/// must remain valid for the entire kernel lifetime. +#[repr(C)] +#[derive(Clone, Copy)] +pub struct BootInfo { + /// Magic number for structure validation. + /// Must be [`BOOT_INFO_MAGIC`]. + pub magic: u64, + + /// Structure version. Must match [`BOOT_INFO_VERSION`]. + pub version: u32, + + /// Reserved for future use. Must be 0. + pub _reserved: u32, + + /// Boot protocol used by the bootloader. + pub protocol: BootProtocol, + + /// Architecture-specific flags (reserved). + pub arch_flags: u32, + + /// Kernel physical load address (where bootloader placed the kernel). + /// + /// This is the **actual** physical address, not the linked address. + /// Kernel can use this to calculate relocation offset. + pub kernel_load_paddr: usize, + + /// Kernel virtual address offset (phys_virt_offset). + /// + /// For higher-half kernels: `virt_addr = phys_addr + phys_virt_offset` + /// + /// Example: + /// - x86_64: `0xffff_8000_0000_0000` + /// - aarch64: `0xffff_0000_0000_0000` + /// - riscv64: `0xffff_ffc0_0000_0000` (sv39) + pub phys_virt_offset: usize, + + /// Device Tree Blob (DTB) physical address, if available. + /// + /// Set to 0 if not provided (e.g., x86_64 BIOS/UEFI without DTB). + pub dtb_addr: usize, + + /// ACPI RSDP (Root System Description Pointer) address, if available. + /// + /// Set to 0 if not provided (e.g., device tree platforms). + pub rsdp_addr: usize, + + /// Initial RAM disk (initrd/initramfs) physical address. + /// + /// Set to 0 if no ramdisk provided. + pub ramdisk_addr: usize, + + /// Ramdisk size in bytes. + pub ramdisk_size: usize, + + /// Command line string physical address (null-terminated). + /// + /// Set to 0 if no command line provided. + pub cmdline_addr: usize, + + /// Command line string length (excluding null terminator). + pub cmdline_len: usize, + + /// Boot CPU ID (MPIDR on ARM, APIC ID on x86, Hart ID on RISC-V). + pub cpu_id: usize, + + /// Total number of CPU cores detected by bootloader. + /// + /// May be 0 if unknown. Kernel should probe actual count. + pub cpu_count: usize, + + /// Framebuffer information (if graphics available). + pub framebuffer: Option, +} + +impl BootInfo { + /// Creates a new BootInfo with the given boot protocol. + /// + /// All optional fields are initialized to safe default values (0 or None). + pub const fn new(protocol: BootProtocol) -> Self { + Self { + magic: BOOT_INFO_MAGIC, + version: BOOT_INFO_VERSION, + _reserved: 0, + protocol, + arch_flags: 0, + kernel_load_paddr: 0, + phys_virt_offset: 0, + dtb_addr: 0, + rsdp_addr: 0, + ramdisk_addr: 0, + ramdisk_size: 0, + cmdline_addr: 0, + cmdline_len: 0, + cpu_id: 0, + cpu_count: 0, + framebuffer: None, + } + } + + /// Validates the BootInfo structure. + /// + /// # Returns + /// + /// - `true` if magic and version are correct + /// - `false` otherwise (corrupted or incompatible) + #[inline] + pub const fn is_valid(&self) -> bool { + self.magic == BOOT_INFO_MAGIC && self.version == BOOT_INFO_VERSION + } + + /// Returns the boot protocol used. + #[inline] + pub const fn protocol(&self) -> BootProtocol { + self.protocol + } + + /// Returns the command line string, if provided. + /// + /// # Returns + /// + /// - `Some(&str)` if cmdline is valid UTF-8 + /// - `None` if not provided or invalid + pub fn cmdline(&self) -> Option<&str> { + if self.cmdline_addr == 0 || self.cmdline_len == 0 { + return None; + } + + unsafe { + let slice = + core::slice::from_raw_parts(self.cmdline_addr as *const u8, self.cmdline_len); + core::str::from_utf8(slice).ok() + } + } + + /// Builder pattern: set DTB address. + #[inline] + pub const fn with_dtb(mut self, addr: usize) -> Self { + self.dtb_addr = addr; + self + } + + /// Builder pattern: set RSDP address. + #[inline] + pub const fn with_rsdp(mut self, addr: usize) -> Self { + self.rsdp_addr = addr; + self + } + + /// Builder pattern: set ramdisk. + #[inline] + pub const fn with_ramdisk(mut self, addr: usize, size: usize) -> Self { + self.ramdisk_addr = addr; + self.ramdisk_size = size; + self + } + + /// Builder pattern: set CPU ID. + #[inline] + pub const fn with_cpu_id(mut self, id: usize) -> Self { + self.cpu_id = id; + self + } + + /// Builder pattern: set kernel load address. + #[inline] + pub const fn with_kernel_load_paddr(mut self, addr: usize) -> Self { + self.kernel_load_paddr = addr; + self + } + + /// Builder pattern: set phys_virt_offset. + #[inline] + pub const fn with_phys_virt_offset(mut self, offset: usize) -> Self { + self.phys_virt_offset = offset; + self + } +} + +impl fmt::Debug for BootInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BootInfo") + .field("magic", &format_args!("{:#x}", self.magic)) + .field("version", &self.version) + .field("protocol", &self.protocol) + .field( + "kernel_load_paddr", + &format_args!("{:#x}", self.kernel_load_paddr), + ) + .field( + "phys_virt_offset", + &format_args!("{:#x}", self.phys_virt_offset), + ) + .field("dtb_addr", &format_args!("{:#x}", self.dtb_addr)) + .field("rsdp_addr", &format_args!("{:#x}", self.rsdp_addr)) + .field( + "ramdisk", + &format_args!( + "{:#x}..{:#x}", + self.ramdisk_addr, + self.ramdisk_addr + self.ramdisk_size + ), + ) + .field("cpu_id", &self.cpu_id) + .field("cpu_count", &self.cpu_count) + .finish() + } +} + +// ===== Boot Protocol Types ===== + +/// Boot protocol identifier. +/// +/// Indicates which firmware/bootloader interface was used. +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum BootProtocol { + /// Unknown/unspecified protocol. + Unknown = 0, + + /// Multiboot v1 (used by GRUB legacy). + Multiboot1 = 1, + + /// Multiboot v2 (modern GRUB). + Multiboot2 = 2, + + /// UEFI Boot Services (x86_64, aarch64). + Uefi = 3, + + /// Device Tree (ARM, RISC-V, LoongArch). + DeviceTree = 4, + + /// Linux Boot Protocol (x86_64). + LinuxBoot = 5, + + /// OpenSBI (RISC-V). + OpenSBI = 6, + + /// U-Boot (ARM, RISC-V). + UBoot = 7, + + /// BIOS (legacy x86). + Bios = 8, +} + +/// Pixel format for framebuffer. +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PixelFormat { + /// RGB (red, green, blue). + Rgb = 0, + + /// BGR (blue, green, red). + Bgr = 1, + + /// Grayscale. + Grayscale = 2, +} + +/// Framebuffer configuration. +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct FrameBufferInfo { + /// Physical address of framebuffer. + pub addr: usize, + + /// Width in pixels. + pub width: u32, + + /// Height in pixels. + pub height: u32, + + /// Pitch/stride in bytes (bytes per scanline). + pub pitch: u32, + + /// Bits per pixel. + pub bpp: u16, + + /// Pixel format. + pub format: PixelFormat, + + /// Reserved. + pub _reserved: u8, +} + +// ===== Safety Assertions ===== + +// Ensure FFI safety and layout stability +const _: () = { + assert!(core::mem::size_of::().is_multiple_of(8)); + assert!(core::mem::align_of::() == 8); + assert!(core::mem::size_of::() == 1); +}; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_bootinfo_creation() { + let boot_info = BootInfo::new(BootProtocol::Multiboot1); + assert!(boot_info.is_valid()); + assert_eq!(boot_info.protocol(), BootProtocol::Multiboot1); + } + + #[test] + fn test_bootinfo_builder() { + let boot_info = BootInfo::new(BootProtocol::Uefi) + .with_dtb(0x8000000) + .with_cpu_id(0); + + assert_eq!(boot_info.dtb_addr, 0x8000000); + assert_eq!(boot_info.cpu_id, 0); + } + + #[test] + fn test_bootinfo_layout() { + use core::mem::{align_of, size_of}; + // BootInfo 必须是 8 字节对齐 + assert_eq!(align_of::(), 8); + // 大小必须是 8 的倍数 (便于 FFI) + assert_eq!(size_of::() % 8, 0); + // 枚举必须是 1 字节 + assert_eq!(size_of::(), 1); + println!("BootInfo size: {} bytes", size_of::()); + } + + #[test] + fn test_bootinfo_defaults() { + let info = BootInfo::new(BootProtocol::Multiboot1); + assert!(info.is_valid()); + assert_eq!(info.dtb_addr, 0); + assert_eq!(info.rsdp_addr, 0); + } +} diff --git a/platforms/kbootloader/src/lib.rs b/platforms/kbootloader/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..668dc44ca0792bff87da4b4153b817141d870de5 --- /dev/null +++ b/platforms/kbootloader/src/lib.rs @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +//! Unified position-independent boot layer for x-kernel (AArch64 support). + +#![cfg_attr(target_os = "none", no_std)] +#![cfg(target_os = "none")] + +pub use linkme::{distributed_slice as def_boot_init, distributed_slice as register_boot_init}; + +#[def_boot_init] +pub static PRIMARY_KERNEL_ENTRY: [fn(usize, usize) -> !]; + +#[def_boot_init] +pub static SECOND_KERNEL_ENTRY: [fn(usize) -> !]; + +macro_rules! call_kernel_entry { + ($entry:ident, $($args:tt)*) => {{ + let mut iter = $crate::$entry.iter(); + if let Some(func) = iter.next() { + func($($args)*) + } + }} +} + +pub mod arch; +pub mod bootinfo; +pub mod size_const; diff --git a/platforms/kbootloader/src/page_table_generic/mod.rs b/platforms/kbootloader/src/page_table_generic/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f67b3eecf100b319628cd58a81dbc062b470967d --- /dev/null +++ b/platforms/kbootloader/src/page_table_generic/mod.rs @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +mod addr; +mod iter; +mod table; +use core::{alloc::Layout, fmt::Debug}; + +use crate::size_const::*; +pub use addr::*; +pub use table::{MapConfig, PageTableRef}; + + +#[derive(Debug, Clone, Copy)] +pub struct PTEInfo { + pub level: usize, + pub vaddr: VirtAddr, + pub pte: P, +} + +pub trait TableGeneric: Sync + Send + Clone + Copy + 'static { + type PTE: PTEGeneric; + + const PAGE_SIZE: usize = 0x1000; + const LEVEL: usize = 4; + const VALID_BITS: usize = 12 + Self::LEVEL * 9; + // 大页最高支持的级别 + const MAX_BLOCK_LEVEL: usize = 3; + const TABLE_LEN: usize = Self::PAGE_SIZE / core::mem::size_of::(); + fn flush(vaddr: Option); +} + +pub trait PTEGeneric: Debug + Sync + Send + Clone + Copy + Sized + 'static { + fn valid(&self) -> bool; + fn paddr(&self) -> PhysAddr; + fn set_paddr(&mut self, paddr: PhysAddr); + fn set_valid(&mut self, valid: bool); + fn is_huge(&self) -> bool; + fn set_is_huge(&mut self, b: bool); +} + +pub trait Access { + /// Alloc memory for a page table entry. + /// + /// # Safety + /// + /// should be deallocated by [`dealloc`]. + unsafe fn alloc(&mut self, layout: Layout) -> Option; + /// dealloc memory for a page table entry. + /// + /// # Safety + /// + /// ptr must be allocated by [`alloc`]. + unsafe fn dealloc(&mut self, ptr: PhysAddr, layout: Layout); + + fn phys_to_mut(&self, phys: PhysAddr) -> *mut u8; +} + +use thiserror::Error; + +/// The error type for page table operation failures. +#[derive(Error, Debug, PartialEq, Eq)] +pub enum PagingError { + #[error("can't allocate memory")] + NoMemory, + #[error("{0} is not aligned")] + NotAligned(&'static str), + #[error("not mapped")] + NotMapped, + #[error("already mapped")] + AlreadyMapped, +} + +/// The specialized `Result` type for page table operations. +pub type PagingResult = Result; + diff --git a/platforms/kbootloader/src/size_const.rs b/platforms/kbootloader/src/size_const.rs new file mode 100644 index 0000000000000000000000000000000000000000..6eee5542c701815ca0ca2544563f52c79809c374 --- /dev/null +++ b/platforms/kbootloader/src/size_const.rs @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2025 KylinSoft Co., Ltd. +// See LICENSES for license details. + +pub const SZ_1M: usize = 1024 * 1024; +pub const SZ_2M: usize = 2 * SZ_1M; +pub const SZ_8M: usize = 8 * SZ_1M; +pub const SZ_16M: usize = 16 * SZ_1M; +pub const SZ_1G: usize = 1024 * SZ_1M; +pub const SZ_2G: usize = 2 * SZ_1G; + +pub const KB: usize = 1024; +pub const MB: usize = 1024 * KB; +pub const GB: usize = 1024 * MB; diff --git a/platforms/kplat-macros/src/lib.rs b/platforms/kplat-macros/src/lib.rs index b7188995c68720ba14b0f69a18125a0ee5ae6a7e..49bfdc8a159869fa27e61904909de8c0dac84c47 100644 --- a/platforms/kplat-macros/src/lib.rs +++ b/platforms/kplat-macros/src/lib.rs @@ -7,65 +7,11 @@ use proc_macro::TokenStream; use proc_macro2::Span; use quote::quote; -use syn::{Error, FnArg, ItemFn, ItemTrait, ReturnType, TraitItem}; +use syn::{Error, FnArg, ItemTrait, TraitItem}; fn err_ts(e: Error) -> TokenStream { e.to_compile_error().into() } -fn check_fn(t: TokenStream, cnt: usize, exp_name: &str, msg: &str) -> TokenStream { - let f = syn::parse_macro_input!(t as ItemFn); - let mut bad = if let ReturnType::Type(_, ty) = &f.sig.output { - quote! { #ty }.to_string() != "!" - } else { - true - }; - let inputs = &f.sig.inputs; - // for i in inputs.iter() { - // if let FnArg::Typed(pt) = i { - // if quote! { #pt.ty }.to_string() != "usize" { - // bad = true; - // break; - // } - // } - // } - if inputs.len() != cnt { - bad = true; - } - if bad { - err_ts(Error::new(Span::call_site(), msg)) - } else { - quote! { - #[unsafe(export_name = #exp_name)] - #f - } - .into() - } -} -/// Marks the primary platform entry function. -#[proc_macro_attribute] -pub fn main(attr: TokenStream, item: TokenStream) -> TokenStream { - if !attr.is_empty() { - return err_ts(Error::new(Span::call_site(), "Attr must be empty")); - } - check_fn( - item, - 2, - "__kplat_main", - "Sign: fn(cpu: usize, arg: usize) -> !", - ) -} -/// Marks the secondary CPU entry function (SMP only). -#[proc_macro_attribute] -pub fn secondary_main(attr: TokenStream, item: TokenStream) -> TokenStream { - if !attr.is_empty() { - return err_ts(Error::new(Span::call_site(), "Attr must be empty")); - } - check_fn( - item, - 1, - "__kplat_secondary_main", - "Sign: fn(cpu: usize) -> !", - ) -} + /// Generates dispatch wrappers for a platform device interface trait. #[proc_macro_attribute] pub fn device_interface(attr: TokenStream, item: TokenStream) -> TokenStream { diff --git a/platforms/kplat/src/lib.rs b/platforms/kplat/src/lib.rs index 5b7c0ce9a5756aaa14b49031cd451b29959a66b9..c7ee2a6e3bd8b792b01fb813cc4ee08c85ce2ecd 100644 --- a/platforms/kplat/src/lib.rs +++ b/platforms/kplat/src/lib.rs @@ -23,9 +23,6 @@ pub mod sys; pub mod timer; pub use crate_interface::impl_interface as impl_dev_interface; -pub use kplat_macros::main; -#[cfg(feature = "smp")] -pub use kplat_macros::secondary_main; #[doc(hidden)] pub mod __priv { @@ -42,19 +39,3 @@ macro_rules! check_str_eq { const _: () = assert!($crate::__priv::str_eq!($l, $r), "String mismatch",); }; } - -/// Primary CPU entry point invoked by the platform startup. -pub fn entry(id: usize, dtb: usize) -> ! { - unsafe { __kplat_main(id, dtb) } -} - -/// Secondary CPU entry point (SMP only). -#[cfg(feature = "smp")] -pub fn entry_secondary(id: usize) -> ! { - unsafe { __kplat_secondary_main(id) } -} - -unsafe extern "Rust" { - fn __kplat_main(id: usize, dtb: usize) -> !; - fn __kplat_secondary_main(id: usize) -> !; -} diff --git a/platforms/x86-csv/Cargo.toml b/platforms/x86-csv/Cargo.toml index 27b5464835da4c28482321d30fa1de3082ec237d..2ceddd3d991aaf906033aefe727f0c69d0539048 100644 --- a/platforms/x86-csv/Cargo.toml +++ b/platforms/x86-csv/Cargo.toml @@ -9,12 +9,13 @@ homepage.workspace = true repository.workspace = true [features] -fp-simd = ["kcpu/fp-simd"] +fp-simd = ["kcpu/fp-simd", "kbootloader/fp-simd"] rtc = ["x86-peripherals/rtc"] smp = ["kplat/smp", "kspin/smp"] reboot-on-system-off = [] [dependencies] +kbootloader = { workspace = true } kspin = { workspace = true } log = { workspace = true } bitflags = "2.6" @@ -28,10 +29,8 @@ kbuild_config = { workspace = true } x86-peripherals = { workspace = true } [target.'cfg(target_arch = "x86_64")'.dependencies] -x86 = "0.52" x86_64 = { workspace = true } multiboot = "0.8" -raw-cpuid = "11.5" [package.metadata.docs.rs] targets = ["x86_64-unknown-none"] diff --git a/platforms/x86-csv/src/boot.rs b/platforms/x86-csv/src/boot.rs deleted file mode 100644 index 0c751b4764d310f2693924644de9fabf30efae06..0000000000000000000000000000000000000000 --- a/platforms/x86-csv/src/boot.rs +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2025 KylinSoft Co., Ltd. -// See LICENSES for license details. - -use core::arch::global_asm; - -use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET, SEV_CBIT_POS}; -use x86_64::registers::{ - control::{Cr0Flags, Cr4Flags}, - model_specific::EferFlags, -}; -const MULTIBOOT_HEADER_FLAGS: usize = 0x0001_0002; -const MULTIBOOT_HEADER_MAGIC: usize = 0x1BADB002; -pub(super) const MULTIBOOT_BOOTLOADER_MAGIC: usize = 0x2BADB002; -const CR0: u64 = Cr0Flags::PROTECTED_MODE_ENABLE.bits() - | Cr0Flags::MONITOR_COPROCESSOR.bits() - | Cr0Flags::NUMERIC_ERROR.bits() - | Cr0Flags::WRITE_PROTECT.bits() - | Cr0Flags::PAGING.bits(); -const CR4: u64 = Cr4Flags::PHYSICAL_ADDRESS_EXTENSION.bits() - | Cr4Flags::PAGE_GLOBAL.bits() - | if cfg!(feature = "fp-simd") { - Cr4Flags::OSFXSR.bits() | Cr4Flags::OSXMMEXCPT_ENABLE.bits() - } else { - 0 - }; -const EFER: u64 = EferFlags::LONG_MODE_ENABLE.bits() | EferFlags::NO_EXECUTE_ENABLE.bits(); -const SEV_CBIT_MASK: u64 = if SEV_CBIT_POS == 0 { - 0 -} else { - 1u64 << SEV_CBIT_POS -}; -#[unsafe(link_section = ".bss.stack")] -static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; -global_asm!( - include_str!("multiboot.S"), - mb_magic = const MULTIBOOT_BOOTLOADER_MAGIC, - mb_hdr_magic = const MULTIBOOT_HEADER_MAGIC, - mb_hdr_flags = const MULTIBOOT_HEADER_FLAGS, - entry = sym crate::rust_entry, - entry_secondary = sym crate::rust_entry_secondary, - offset = const PHYS_VIRT_OFFSET, - boot_stack_size = const BOOT_STACK_SIZE, - boot_stack = sym BOOT_STACK, - cr0 = const CR0, - cr4 = const CR4, - efer_msr = const x86::msr::IA32_EFER, - efer = const EFER, - cbit_mask = const SEV_CBIT_MASK, -); diff --git a/platforms/x86-csv/src/init.rs b/platforms/x86-csv/src/init.rs index d9d0c2c182c56443ffb1be04a2553a39762262da..a3718a61b340709fa2ca95a398fcd7d57e495c04 100644 --- a/platforms/x86-csv/src/init.rs +++ b/platforms/x86-csv/src/init.rs @@ -7,16 +7,13 @@ struct BootHandlerImpl; #[impl_dev_interface] impl BootHandler for BootHandlerImpl { fn early_init(_cpu_id: usize, mbi: usize) { - kcpu::boot::init_trap(); x86_peripherals::ns16550::init(); x86_peripherals::tsc_timer::early_init(); crate::mem::init(mbi); } #[cfg(feature = "smp")] - fn early_init_ap(_cpu_id: usize) { - kcpu::boot::init_trap(); - } + fn early_init_ap(_cpu_id: usize) {} fn final_init(_cpu_id: usize, _arg: usize) { crate::psci::init(); diff --git a/platforms/x86-csv/src/lib.rs b/platforms/x86-csv/src/lib.rs index d3006e6baaae13db32ba73d8b186f37c559cce16..a301903ad7f506018dec4d40e3b90303a2745f6a 100644 --- a/platforms/x86-csv/src/lib.rs +++ b/platforms/x86-csv/src/lib.rs @@ -8,7 +8,7 @@ extern crate log; #[macro_use] extern crate kplat; -mod boot; +extern crate kbootloader; mod init; mod mem; #[cfg(feature = "smp")] @@ -19,21 +19,3 @@ pub mod psci; x86_peripherals::console_if_impl!(ConsoleImpl, irq = None); x86_peripherals::time_if_impl!(GlobalTimerImpl); x86_peripherals::irq_if_impl!(IntrManagerImpl); - -fn current_cpu_id() -> usize { - match raw_cpuid::CpuId::new().get_feature_info() { - Some(finfo) => finfo.initial_local_apic_id() as usize, - None => 0, - } -} -unsafe extern "C" fn rust_entry(magic: usize, mbi: usize) { - if magic == self::boot::MULTIBOOT_BOOTLOADER_MAGIC { - kplat::entry(current_cpu_id(), mbi); - } -} -unsafe extern "C" fn rust_entry_secondary(_magic: usize) { - #[cfg(feature = "smp")] - if _magic == self::boot::MULTIBOOT_BOOTLOADER_MAGIC { - kplat::entry_secondary(current_cpu_id()); - } -} diff --git a/platforms/x86-csv/src/mp.rs b/platforms/x86-csv/src/mp.rs index df8cade8d75415adb963a7e61a32879596b9d903..512110c6098ed25d4189500f3d4185803db37e93 100644 --- a/platforms/x86-csv/src/mp.rs +++ b/platforms/x86-csv/src/mp.rs @@ -4,17 +4,14 @@ use core::time::Duration; +use kbootloader::arch::{AP_START_PAGE_IDX, AP_START_PAGE_PADDR}; use kplat::{ memory::{PAGE_SIZE_4K, PhysAddr, pa}, timer::spin_wait, }; -const START_PAGE_IDX: u8 = 6; -const START_PAGE_PADDR: PhysAddr = pa!(START_PAGE_IDX as usize * PAGE_SIZE_4K); -core::arch::global_asm!( - include_str!("ap_start.S"), - start_page_paddr = const START_PAGE_PADDR.as_usize(), -); +const START_PAGE_PADDR: PhysAddr = pa!(AP_START_PAGE_PADDR); + unsafe fn setup_startup_page(stack_top: PhysAddr) { unsafe extern "C" { fn ap_entry32(); @@ -40,7 +37,7 @@ pub fn start_secondary_cpu(apic_id: usize, stack_top: PhysAddr) { let lapic = x86_peripherals::apic::local_apic(); unsafe { lapic.send_init_ipi(apic_id) }; spin_wait(Duration::from_millis(10)); - unsafe { lapic.send_sipi(START_PAGE_IDX, apic_id) }; + unsafe { lapic.send_sipi(AP_START_PAGE_IDX, apic_id) }; spin_wait(Duration::from_micros(200)); - unsafe { lapic.send_sipi(START_PAGE_IDX, apic_id) }; + unsafe { lapic.send_sipi(AP_START_PAGE_IDX, apic_id) }; } diff --git a/platforms/x86_64-qemu-virt/Cargo.toml b/platforms/x86_64-qemu-virt/Cargo.toml index 14df8003400efbfbd12313e47934c5734ccbcb29..827424b1b6954b93f5942299d175adf752991ba8 100644 --- a/platforms/x86_64-qemu-virt/Cargo.toml +++ b/platforms/x86_64-qemu-virt/Cargo.toml @@ -9,12 +9,13 @@ homepage.workspace = true repository.workspace = true [features] -fp-simd = ["kcpu/fp-simd"] +fp-simd = ["kcpu/fp-simd", "kbootloader/fp-simd"] rtc = ["x86-peripherals/rtc"] smp = ["kplat/smp", "kspin/smp"] reboot-on-system-off = [] [dependencies] +kbootloader = { workspace = true } kspin = { workspace = true } log = "0.4" bitflags = "2.6" @@ -28,10 +29,8 @@ kbuild_config = { workspace = true } x86-peripherals = { workspace = true } [target.'cfg(target_arch = "x86_64")'.dependencies] -x86 = "0.52" x86_64 = { workspace = true } multiboot = "0.8" -raw-cpuid = "11.5" [package.metadata.docs.rs] targets = ["x86_64-unknown-none"] diff --git a/platforms/x86_64-qemu-virt/defconfig b/platforms/x86_64-qemu-virt/defconfig index 9ca24b2f533cd462ecc68d4e12e872c44b80000e..26dcb6dab9739ba2155cbd8fc7c65a3cf214d48b 100644 --- a/platforms/x86_64-qemu-virt/defconfig +++ b/platforms/x86_64-qemu-virt/defconfig @@ -39,7 +39,7 @@ PLATFORM="x86_64-qemu-virt" # PLATFORM_RISCV64_QEMU_VIRT is not set PLATFORM_X86_64_QEMU_VIRT=y # PLATFORM_X86_CSV is not set -SEV_CBIT_POS=47 +SEV_CBIT_POS=0 TASK_STACK_SIZE=0x40000 TICKS_PER_SECOND=100 TIMER_FREQUENCY_HZ=4000000000 diff --git a/platforms/x86_64-qemu-virt/src/ap_start.S b/platforms/x86_64-qemu-virt/src/ap_start.S deleted file mode 100644 index 2af322b01f4e587807b5919f174e29b567387252..0000000000000000000000000000000000000000 --- a/platforms/x86_64-qemu-virt/src/ap_start.S +++ /dev/null @@ -1,70 +0,0 @@ -# Boot application processors into the protected mode. - -# Each non-boot CPU ("AP") is started up in response to a STARTUP -# IPI from the boot CPU. Section B.4.2 of the Multi-Processor -# Specification says that the AP will start in real mode with CS:IP -# set to XY00:0000, where XY is an 8-bit value sent with the -# STARTUP. Thus this code must start at a 4096-byte boundary. -# -# Because this code sets DS to zero, it must sit -# at an address in the low 2^16 bytes. - -.equ pa_ap_start32, ap_start32 - ap_start + {start_page_paddr} -.equ pa_ap_gdt, .Lap_tmp_gdt - ap_start + {start_page_paddr} -.equ pa_ap_gdt_desc, .Lap_tmp_gdt_desc - ap_start + {start_page_paddr} - -.equ stack_ptr, {start_page_paddr} + 0xff0 -.equ entry_ptr, {start_page_paddr} + 0xff8 - -# 0x6000 -.section .text -.code16 -.p2align 12 -.global ap_start -ap_start: - cli - wbinvd - - xor ax, ax - mov ds, ax - mov es, ax - mov ss, ax - mov fs, ax - mov gs, ax - - # load the 64-bit GDT - lgdt [pa_ap_gdt_desc] - - # switch to protected-mode - mov eax, cr0 - or eax, (1 << 0) - mov cr0, eax - - # far jump to 32-bit code. 0x8 is code32 segment selector - ljmp 0x8, offset pa_ap_start32 - -.code32 -ap_start32: - mov esp, [stack_ptr] - mov eax, [entry_ptr] - jmp eax - -.balign 8 -# .type multiboot_header, STT_OBJECT -.Lap_tmp_gdt_desc: - .short .Lap_tmp_gdt_end - .Lap_tmp_gdt - 1 # limit - .long pa_ap_gdt # base - -.balign 16 -.Lap_tmp_gdt: - .quad 0x0000000000000000 # 0x00: null - .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) - .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) - .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) -.Lap_tmp_gdt_end: - -.code64 -# 0x7000 -.p2align 12 -.global ap_end -ap_end: diff --git a/platforms/x86_64-qemu-virt/src/boot.rs b/platforms/x86_64-qemu-virt/src/boot.rs deleted file mode 100644 index db08f3cd4f6ac76e5fe96d555168c391ab121d3d..0000000000000000000000000000000000000000 --- a/platforms/x86_64-qemu-virt/src/boot.rs +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright 2025 KylinSoft Co., Ltd. -// See LICENSES for license details. - -//! Multiboot entry setup for x86_64-qemu-virt. - -use core::arch::global_asm; - -use kbuild_config::{BOOT_STACK_SIZE, PHYS_VIRT_OFFSET}; -use x86_64::registers::{ - control::{Cr0Flags, Cr4Flags}, - model_specific::EferFlags, -}; -const MULTIBOOT_HEADER_FLAGS: usize = 0x0001_0002; -const MULTIBOOT_HEADER_MAGIC: usize = 0x1BADB002; -pub(super) const MULTIBOOT_BOOTLOADER_MAGIC: usize = 0x2BADB002; -const CR0: u64 = Cr0Flags::PROTECTED_MODE_ENABLE.bits() - | Cr0Flags::MONITOR_COPROCESSOR.bits() - | Cr0Flags::NUMERIC_ERROR.bits() - | Cr0Flags::WRITE_PROTECT.bits() - | Cr0Flags::PAGING.bits(); -const CR4: u64 = Cr4Flags::PHYSICAL_ADDRESS_EXTENSION.bits() - | Cr4Flags::PAGE_GLOBAL.bits() - | if cfg!(feature = "fp-simd") { - Cr4Flags::OSFXSR.bits() | Cr4Flags::OSXMMEXCPT_ENABLE.bits() - } else { - 0 - }; -const EFER: u64 = EferFlags::LONG_MODE_ENABLE.bits() | EferFlags::NO_EXECUTE_ENABLE.bits(); -#[unsafe(link_section = ".bss.stack")] -static mut BOOT_STACK: [u8; BOOT_STACK_SIZE] = [0; BOOT_STACK_SIZE]; -global_asm!( - include_str!("multiboot.S"), - mb_magic = const MULTIBOOT_BOOTLOADER_MAGIC, - mb_hdr_magic = const MULTIBOOT_HEADER_MAGIC, - mb_hdr_flags = const MULTIBOOT_HEADER_FLAGS, - entry = sym crate::rust_entry, - entry_secondary = sym crate::rust_entry_secondary, - offset = const PHYS_VIRT_OFFSET, - boot_stack_size = const BOOT_STACK_SIZE, - boot_stack = sym BOOT_STACK, - cr0 = const CR0, - cr4 = const CR4, - efer_msr = const x86::msr::IA32_EFER, - efer = const EFER, -); diff --git a/platforms/x86_64-qemu-virt/src/init.rs b/platforms/x86_64-qemu-virt/src/init.rs index a2f8241c9222289c14c235f6b603b7837981da11..413499fa134fd7540d53f6bd4b75ebf3551e7148 100644 --- a/platforms/x86_64-qemu-virt/src/init.rs +++ b/platforms/x86_64-qemu-virt/src/init.rs @@ -9,16 +9,13 @@ struct BootHandlerImpl; #[impl_dev_interface] impl BootHandler for BootHandlerImpl { fn early_init(_cpu_id: usize, mbi: usize) { - kcpu::boot::init_trap(); x86_peripherals::ns16550::init(); x86_peripherals::tsc_timer::early_init(); crate::mem::init(mbi); } #[cfg(feature = "smp")] - fn early_init_ap(_cpu_id: usize) { - kcpu::boot::init_trap(); - } + fn early_init_ap(_cpu_id: usize) {} fn final_init(_cpu_id: usize, _arg: usize) { x86_peripherals::apic::init_primary(kplat::memory::pa!(0xFEC0_0000)); diff --git a/platforms/x86_64-qemu-virt/src/lib.rs b/platforms/x86_64-qemu-virt/src/lib.rs index 47242c3a344384c63c80884032854d10d772af72..6528b879d8fdccbf4d9679ff3167e330d57d2891 100644 --- a/platforms/x86_64-qemu-virt/src/lib.rs +++ b/platforms/x86_64-qemu-virt/src/lib.rs @@ -10,7 +10,7 @@ extern crate log; #[macro_use] extern crate kplat; -mod boot; +extern crate kbootloader; mod init; mod mem; #[cfg(feature = "smp")] @@ -20,21 +20,3 @@ mod power; x86_peripherals::console_if_impl!(ConsoleImpl, irq = Some(4)); x86_peripherals::time_if_impl!(GlobalTimerImpl); x86_peripherals::irq_if_impl!(IntrManagerImpl); - -fn current_cpu_id() -> usize { - match raw_cpuid::CpuId::new().get_feature_info() { - Some(finfo) => finfo.initial_local_apic_id() as usize, - None => 0, - } -} -unsafe extern "C" fn rust_entry(magic: usize, mbi: usize) { - if magic == self::boot::MULTIBOOT_BOOTLOADER_MAGIC { - kplat::entry(current_cpu_id(), mbi); - } -} -unsafe extern "C" fn rust_entry_secondary(_magic: usize) { - #[cfg(feature = "smp")] - if _magic == self::boot::MULTIBOOT_BOOTLOADER_MAGIC { - kplat::entry_secondary(current_cpu_id()); - } -} diff --git a/platforms/x86_64-qemu-virt/src/mp.rs b/platforms/x86_64-qemu-virt/src/mp.rs index 2b66fd1a2e04c81748123505e6b6a6ada361ed56..5cecf0aa98e2d4a57619377020bd96715414e103 100644 --- a/platforms/x86_64-qemu-virt/src/mp.rs +++ b/platforms/x86_64-qemu-virt/src/mp.rs @@ -6,17 +6,14 @@ use core::time::Duration; +use kbootloader::arch::{AP_START_PAGE_IDX, AP_START_PAGE_PADDR}; use kplat::{ memory::{PAGE_SIZE_4K, PhysAddr, pa}, timer::spin_wait, }; -const START_PAGE_IDX: u8 = 6; -const START_PAGE_PADDR: PhysAddr = pa!(START_PAGE_IDX as usize * PAGE_SIZE_4K); -core::arch::global_asm!( - include_str!("ap_start.S"), - start_page_paddr = const START_PAGE_PADDR.as_usize(), -); +const START_PAGE_PADDR: PhysAddr = pa!(AP_START_PAGE_PADDR); + unsafe fn setup_startup_page(stack_top: PhysAddr) { unsafe extern "C" { fn ap_entry32(); @@ -43,7 +40,7 @@ pub fn start_secondary_cpu(apic_id: usize, stack_top: PhysAddr) { let lapic = x86_peripherals::apic::local_apic(); unsafe { lapic.send_init_ipi(apic_id) }; spin_wait(Duration::from_millis(10)); - unsafe { lapic.send_sipi(START_PAGE_IDX, apic_id) }; + unsafe { lapic.send_sipi(AP_START_PAGE_IDX, apic_id) }; spin_wait(Duration::from_micros(200)); - unsafe { lapic.send_sipi(START_PAGE_IDX, apic_id) }; + unsafe { lapic.send_sipi(AP_START_PAGE_IDX, apic_id) }; } diff --git a/platforms/x86_64-qemu-virt/src/multiboot.S b/platforms/x86_64-qemu-virt/src/multiboot.S deleted file mode 100644 index c0af72a381f9e5aaf328f371d9bae81d5b0c8c92..0000000000000000000000000000000000000000 --- a/platforms/x86_64-qemu-virt/src/multiboot.S +++ /dev/null @@ -1,144 +0,0 @@ -# Bootstrapping from 32-bit with the Multiboot specification. -# See https://www.gnu.org/software/grub/manual/multiboot/multiboot.html - -.section .text.boot -.code32 -.global _start -_start: - mov edi, eax # arg1: magic: 0x2BADB002 - mov esi, ebx # arg2: multiboot info - jmp bsp_entry32 - -.balign 4 -.type multiboot_header, STT_OBJECT -multiboot_header: - .int {mb_hdr_magic} # magic: 0x1BADB002 - .int {mb_hdr_flags} # flags - .int -({mb_hdr_magic} + {mb_hdr_flags}) # checksum - .int multiboot_header - {offset} # header_addr - .int _skernel - {offset} # load_addr - .int _edata - {offset} # load_end - .int _ebss - {offset} # bss_end_addr - .int _start - {offset} # entry_addr - -# Common code in 32-bit, prepare states to enter 64-bit. -.macro ENTRY32_COMMON - # set data segment selectors - mov ax, 0x18 - mov ss, ax - mov ds, ax - mov es, ax - mov fs, ax - mov gs, ax - - # set PAE, PGE bit in CR4 - mov eax, {cr4} - mov cr4, eax - - # load the temporary page table - lea eax, [.Ltmp_pml4 - {offset}] - mov cr3, eax - - # set LME, NXE bit in IA32_EFER - mov ecx, {efer_msr} - mov edx, 0 - mov eax, {efer} - wrmsr - - # set protected mode, write protect, paging bit in CR0 - mov eax, {cr0} - mov cr0, eax -.endm - -# Common code in 64-bit -.macro ENTRY64_COMMON - # clear segment selectors - xor ax, ax - mov ss, ax - mov ds, ax - mov es, ax - mov fs, ax - mov gs, ax -.endm - -.code32 -bsp_entry32: - lgdt [.Ltmp_gdt_desc - {offset}] # load the temporary GDT - ENTRY32_COMMON - ljmp 0x10, offset bsp_entry64 - {offset} # 0x10 is code64 segment - -.code32 -.global ap_entry32 -ap_entry32: - ENTRY32_COMMON - ljmp 0x10, offset ap_entry64 - {offset} # 0x10 is code64 segment - -.code64 -bsp_entry64: - ENTRY64_COMMON - - # set RSP to boot stack - movabs rsp, offset {boot_stack} - add rsp, {boot_stack_size} - - # call rust_entry(magic, mbi) - movabs rax, offset {entry} - call rax - jmp .Lhlt - -.code64 -ap_entry64: - ENTRY64_COMMON - - # set RSP to high address (already set in ap_start.S) - mov rax, {offset} - add rsp, rax - - # call rust_entry_secondary(magic) - mov rdi, {mb_magic} - movabs rax, offset {entry_secondary} - call rax - jmp .Lhlt - -.Lhlt: - hlt - jmp .Lhlt - -.section .rodata -.balign 8 -.Ltmp_gdt_desc: - .short .Ltmp_gdt_end - .Ltmp_gdt - 1 # limit - .long .Ltmp_gdt - {offset} # base - -.section .data -.balign 16 -.Ltmp_gdt: - .quad 0x0000000000000000 # 0x00: null - .quad 0x00cf9b000000ffff # 0x08: code segment (base=0, limit=0xfffff, type=32bit code exec/read, DPL=0, 4k) - .quad 0x00af9b000000ffff # 0x10: code segment (base=0, limit=0xfffff, type=64bit code exec/read, DPL=0, 4k) - .quad 0x00cf93000000ffff # 0x18: data segment (base=0, limit=0xfffff, type=32bit data read/write, DPL=0, 4k) -.Ltmp_gdt_end: - -.balign 4096 -.Ltmp_pml4: - # 0x0000_0000 ~ 0x7f_ffff_ffff (512 GiB) - .quad .Ltmp_pdpt_low - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) - .zero 8 * 255 - # 0xffff_8000_0000_0000 ~ 0xffff_807f_ffff_ffff (512 GiB) - .quad .Ltmp_pdpt_high - {offset} + 0x3 # PRESENT | WRITABLE | paddr(tmp_pdpt) - .zero 8 * 255 - -# FIXME: may not work on macOS using hvf as the CPU does not support 1GB page (pdpe1gb) -.Ltmp_pdpt_low: -.set i, 0 -.rept 512 - .quad 0x40000000 * i | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000 * i) - .set i, i + 1 -.endr - -.Ltmp_pdpt_high: -.set i, 0 -.rept 512 - .quad 0x40000000 * i | 0x83 # PRESENT | WRITABLE | HUGE_PAGE | paddr(0x4000_0000 * i) - .set i, i + 1 -.endr diff --git a/scripts/make/dwarf.sh b/scripts/make/dwarf.sh index 82d5eea613dc4e34be03b91b9dc40b82cd2554ab..832488dc26940a1a57aff7e944ed7c82f1f89765 100755 --- a/scripts/make/dwarf.sh +++ b/scripts/make/dwarf.sh @@ -26,19 +26,30 @@ SECTIONS=( debug_str_offsets ) +# Step 1: Dump all .debug_* sections to temporary files (in parallel) for section in "${SECTIONS[@]}"; do - $OBJCOPY $ELF --dump-section .$section=$section.bin 2> /dev/null || touch $section.bin & + $OBJCOPY "$ELF" --dump-section ".$section=$section.bin" 2> /dev/null || touch "$section.bin" & done wait -$OBJCOPY $ELF --strip-debug -cmd=($OBJCOPY $ELF) +# Step 2: Strip debug info from the ELF. +# This removes all .debug_* sections and the SHT_SYMTAB_SHNDX table +# that references them, producing a clean ELF. +$OBJCOPY "$ELF" --strip-debug + +# Step 3: Re-add the debug data as non-dot-prefixed sections (e.g. "debug_info") +# using --add-section instead of --update-section + --rename-section. +# This avoids the llvm-objcopy bug where SHN_XINDEX entries become +# inconsistent after combined update+rename operations. +cmd=($OBJCOPY "$ELF") for section in "${SECTIONS[@]}"; do - cmd+=(--update-section $section=$section.bin) - cmd+=(--rename-section $section=.$section) + if [ -s "$section.bin" ]; then + cmd+=(--update-section "$section=$section.bin") + fi done -${cmd[@]} +"${cmd[@]}" +# Step 4: Clean up temporary files for section in "${SECTIONS[@]}"; do - rm -f $section.bin + rm -f "$section.bin" done