diff --git a/.cargo/config.toml b/.cargo/config.toml index b73ed2c..eb378db 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,16 +1,13 @@ +include = [ + "../config.toml" +] [alias] xtask = "--config xtasks/.cargo/config.toml run -p xtask --release --" -[env] - -[build] -target = "host-tuple" - -[target] - [target.'cfg(target_os = "none")'] -rustflags = ["-C", "link-arg=--entry=main",] - -[target.thumbv7em-none-eabi] -rustflags = ["-C", "relocation-model=ropi-rwpi"] +rustflags = [ + "-C", "link-arg=--entry=main", + "-C", "link-arg=-Tprelude.ld", + "-C", "link-arg=-Tlink.ld", +] \ No newline at end of file diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 0857d86..d0a9646 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -86,7 +86,7 @@ ENV CARGO_HOME=/usr/local/cargo ENV KANI_HOME=/usr/local/kani ENV PATH="/usr/local/cargo/bin:${PATH}" -ARG RUST_VERSION=1.93.1 +ARG RUST_VERSION=1.94.1 RUN --mount=type=cache,target=/usr/local/cargo/registry \ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs \ | sh -s -- -y --no-modify-path --default-toolchain ${RUST_VERSION} && \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 89b2ba7..175a961 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -36,7 +36,9 @@ "runArgs": [ // Mount USB devices under Linux "--device", - "/dev/bus/usb:/dev/bus/usb" + "/dev/bus/usb:/dev/bus/usb", + "--group-add", + "keep-groups" ], "mounts": [ // Make ssh keys available diff --git a/.gitignore b/.gitignore index aeeb6ba..5902a98 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ symbols.map compile_commands.json .cache/ *.img +config.toml diff --git a/Cargo.lock b/Cargo.lock index 933791c..e0af60e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,7 +95,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -118,7 +118,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -132,6 +132,12 @@ dependencies = [ "syn", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.10.0" @@ -412,13 +418,19 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crossterm" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ - "bitflags", + "bitflags 2.10.0", "crossterm_winapi", "libc", "mio 0.8.11", @@ -434,7 +446,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags", + "bitflags 2.10.0", "crossterm_winapi", "mio 1.1.1", "parking_lot", @@ -514,6 +526,48 @@ dependencies = [ "syn", ] +[[package]] +name = "defmt" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "548d977b6da32fa1d1fda2876453da1e7df63ad0304c8b3dae4dbe7b96f39b78" +dependencies = [ + "bitflags 1.3.2", + "defmt-macros", +] + +[[package]] +name = "defmt-macros" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d4fc12a85bcf441cfe44344c4b72d58493178ce635338a3f3b78943aceb258e" +dependencies = [ + "defmt-parser", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "defmt-parser" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10d60334b3b2e7c9d91ef8150abfb6fa4c1c39ebbcf4a81c2e346aad939fee3e" +dependencies = [ + "thiserror 2.0.17", +] + +[[package]] +name = "defmt-rtt" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d5a25c99d89c40f5676bec8cefe0614f17f0f40e916f98e345dae941807f9e" +dependencies = [ + "critical-section", + "defmt", +] + [[package]] name = "digest" version = "0.10.7" @@ -529,6 +583,9 @@ version = "0.1.0" dependencies = [ "clap", "fdt", + "indoc", + "log", + "logging", "prettyplease", "proc-macro2", "quote", @@ -713,6 +770,7 @@ dependencies = [ "bindgen 0.72.1", "cbindgen", "cmake", + "critical-section", "hal-api", "serde_json", ] @@ -831,15 +889,6 @@ dependencies = [ "syn", ] -[[package]] -name = "interface" -version = "0.1.0" -dependencies = [ - "bytemuck", - "cbindgen", - "cfg_aliases", -] - [[package]] name = "is_terminal_polyfill" version = "1.70.2" @@ -894,6 +943,36 @@ dependencies = [ "syn", ] +[[package]] +name = "kani" +version = "0.67.0" +source = "git+https://github.com/model-checking/kani#ac1f0a1c03fcbb805002154689ba15184e2f36b7" +dependencies = [ + "kani_core", + "kani_macros", +] + +[[package]] +name = "kani_core" +version = "0.67.0" +source = "git+https://github.com/model-checking/kani#ac1f0a1c03fcbb805002154689ba15184e2f36b7" +dependencies = [ + "kani_macros", +] + +[[package]] +name = "kani_macros" +version = "0.67.0" +source = "git+https://github.com/model-checking/kani#ac1f0a1c03fcbb805002154689ba15184e2f36b7" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "strum 0.27.2", + "strum_macros 0.27.2", + "syn", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1060,13 +1139,16 @@ name = "osiris" version = "0.1.0" dependencies = [ "bindgen 0.69.5", + "bitflags 2.10.0", "cbindgen", "cfg_aliases", + "defmt", + "defmt-rtt", "dtgen", "envparse", "hal-select", "hal-testing", - "interface", + "kani", "macros", "quote", "rand", @@ -1084,7 +1166,6 @@ dependencies = [ "clap", "crc-fast", "elf", - "interface", "log", "logging", "tempfile", @@ -1154,6 +1235,28 @@ dependencies = [ "syn", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "proc-macro2" version = "1.0.105" @@ -1214,7 +1317,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eabd94c2f37801c20583fc49dd5cd6b0ba68c716787c2dd6ed18571e1e63117b" dependencies = [ - "bitflags", + "bitflags 2.10.0", "cassowary", "compact_str", "crossterm 0.28.1", @@ -1223,7 +1326,7 @@ dependencies = [ "itertools 0.13.0", "lru", "paste", - "strum", + "strum 0.26.3", "unicode-segmentation", "unicode-truncate", "unicode-width 0.2.0", @@ -1235,7 +1338,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.10.0", ] [[package]] @@ -1293,7 +1396,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -1306,7 +1409,7 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.11.0", @@ -1500,9 +1603,15 @@ version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "strum_macros", + "strum_macros 0.26.4", ] +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" + [[package]] name = "strum_macros" version = "0.26.4" @@ -1516,6 +1625,18 @@ dependencies = [ "syn", ] +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "syn" version = "2.0.114" diff --git a/Cargo.toml b/Cargo.toml index cb3fff1..e451399 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,10 @@ [workspace] members = ["examples/*", "xtasks", "xtasks/crates/*"] +default-members = ["."] [workspace.dependencies] -interface = { path = "interface" } logging = { path = "xtasks/logging" } +osiris = { path = "." } [package] name = "osiris" @@ -19,20 +20,26 @@ path = "src/main.rs" [dependencies] hal = { package = "hal-select", path = "machine/select" } -macros = { path = "macros" } -interface = { path = "interface" } +proc_macros = { package = "macros", path = "macros" } envparse = "0.1.0" +bitflags = "2.10.0" +defmt = { version = "1.0", optional = true } +defmt-rtt = { version = "1.0", optional = true } [dev-dependencies] # This is a host-compatible HAL which will be used for running tests and verification on the host. hal-testing = { path = "machine/testing", features = [] } +rand = "0.8.5" +[target.'cfg(kani_ra)'.dependencies] +kani = { git = "https://github.com/model-checking/kani" } [features] default = [] nightly = [] no-atomic-cas = [] multi-core = [] +defmt = ["dep:defmt", "dep:defmt-rtt"] [build-dependencies] cbindgen = "0.28.0" diff --git a/boards/nucleo_l4r5zi.dts b/boards/nucleo_l4r5zi.dts index d99a7d1..ce19d57 100644 --- a/boards/nucleo_l4r5zi.dts +++ b/boards/nucleo_l4r5zi.dts @@ -12,6 +12,8 @@ #include #include +/delete-node/ &sram1; + / { model = "STMicroelectronics STM32L4R5ZI-NUCLEO board"; compatible = "st,stm32l4r5zi-nucleo"; @@ -27,6 +29,11 @@ osiris,entropy = &rng; }; + sram1: memory@20030000 { + device_type = "memory"; + reg = <0x20030000 0x00010000>; + }; + leds: leds { compatible = "gpio-leds"; diff --git a/build.rs b/build.rs index eb314c7..27c4cfa 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,4 @@ +use core::panic; use std::process::Command; use std::{collections::HashMap, fs, fs::File, path::Path, path::PathBuf}; @@ -6,9 +7,9 @@ extern crate syn; extern crate walkdir; use cfg_aliases::cfg_aliases; -use quote::ToTokens; +use quote::format_ident; use std::io::Write; -use syn::{Attribute, FnArg, LitInt, punctuated::Punctuated, token::Comma}; +use syn::{Attribute, LitInt}; use walkdir::WalkDir; extern crate cbindgen; @@ -16,17 +17,18 @@ extern crate cbindgen; fn main() { println!("cargo::rerun-if-changed=src"); println!("cargo::rerun-if-changed=build.rs"); + let out_dir = std::env::var("OUT_DIR").unwrap(); - generate_syscall_map("src/syscalls").expect("Failed to generate syscall map."); - generate_syscalls_export("src/syscalls").expect("Failed to generate syscall exports."); + if gen_syscall_match(Path::new("src/syscalls"), Path::new(&out_dir)).is_err() { + panic!("Failed to generate syscall match statement."); + } - generate_device_tree().expect("Failed to generate device tree."); + let dt = build_device_tree(Path::new(&out_dir)).unwrap_or_else(|e| { + panic!("Failed to build device tree from DTS files: {e}"); + }); - // Get linker script from environment variable - if let Ok(linker_script) = std::env::var("DEP_HAL_LINKER_SCRIPT") { - println!("cargo::rustc-link-arg=-T{linker_script}"); - } else { - println!("cargo::warning=LD_SCRIPT_PATH environment variable not set."); + if let Err(e) = generate_device_tree(&dt, Path::new(&out_dir)) { + panic!("Failed to generate device tree scripts: {e}"); } cfg_aliases! { @@ -36,16 +38,25 @@ fn main() { // Device Tree Codegen ---------------------------------------------------------------------------- -fn generate_device_tree() -> Result<(), Box> { +fn generate_device_tree(dt: &dtgen::ir::DeviceTree, out: &Path) -> Result<(), Box> { + let rust_content = dtgen::generate_rust(dt); + std::fs::write(out.join("device_tree.rs"), rust_content)?; + + let ld_content = dtgen::generate_ld(dt).map_err(|e| format!("linker script generation failed: {e}"))?; + std::fs::write(out.join("prelude.ld"), ld_content)?; + println!("cargo::rustc-link-search=native={}", out.display()); + Ok(()) +} + +fn build_device_tree(out: &Path) -> Result> { let dts = std::env::var("OSIRIS_TUNING_DTS").unwrap_or_else(|_| "nucleo_l4r5zi.dts".to_string()); - println!("cargo::rerun-if-changed={dts}"); - let dts_path = std::path::Path::new("boards").join(dts); + println!("cargo::rerun-if-changed={}", dts_path.display()); // dependencies SoC/HAL/pins - let zephyr = Path::new(&std::env::var("OUT_DIR").unwrap()).join("zephyr"); - let hal_stm32 = Path::new(&std::env::var("OUT_DIR").unwrap()).join("hal_stm32"); + let zephyr = Path::new(out).join("zephyr"); + let hal_stm32 = Path::new(out).join("hal_stm32"); // clean state if zephyr.exists() { @@ -76,7 +87,7 @@ fn generate_device_tree() -> Result<(), Box> { Some(&hal_rev), )?; - let out = Path::new(&std::env::var("OUT_DIR").unwrap()).join("device_tree.rs"); + //let out = Path::new(&std::env::var("OUT_DIR").unwrap()).join("device_tree.rs"); let include_paths = [ zephyr.join("include"), zephyr.join("dts/arm/st"), @@ -96,8 +107,7 @@ fn generate_device_tree() -> Result<(), Box> { } } - dtgen::run(&dts_path, &include_refs, &out)?; - Ok(()) + Ok(dtgen::parse_dts(&dts_path, &include_refs)?) } fn get_hal_revision(zephyr_path: &Path) -> Result> { @@ -165,76 +175,26 @@ fn sparse_clone( // Syscalls --------------------------------------------------------------------------------------- -fn generate_syscalls_export>(root: P) -> Result<(), std::io::Error> { - let syscalls = collect_syscalls_export(root); - - let out_dir = std::env::var("OUT_DIR").unwrap(); - let out_path = Path::new(&out_dir).join("syscalls_export.rs"); - let mut file = File::create(out_path)?; - - writeln!(file, "// This file is @generated by build.rs. Do not edit!")?; +fn gen_syscall_match(root: &Path, out: &Path) -> Result<(), std::io::Error> { + let syscalls = find_syscalls(root); + let mut file = File::create(out.join("syscall_match.in"))?; - for (name, (number, inputs)) in &syscalls { - let mut args = &inputs.iter().fold("".to_owned(), |acc, arg| { - acc + "," + &arg.into_token_stream().to_string() - })[..]; - if !args.is_empty() { - args = &args[1..]; + let arms = syscalls.iter().map(|(name, number)| { + let entry = format_ident!("entry_{}", name); + quote::quote! { + #number => #entry(args), } - let names = get_arg_names(args); - writeln!(file)?; - writeln!(file, "pub fn {name}({args}) {{")?; - writeln!(file, " hal::asm::syscall!({number}{names});")?; - writeln!(file, "}}")?; - } - - Ok(()) -} - -fn get_arg_names(args: &str) -> String { - if args.is_empty() { - return "".to_string(); - } - let mut in_arg_name = true; + }); - ", ".to_owned() - + &args.chars().fold("".to_owned(), |mut acc, char| { - if char.eq(&' ') { - in_arg_name = false; - return acc; - } - if char.eq(&',') { - in_arg_name = true; - return acc + ", "; - } - if in_arg_name { - acc.push(char); - } - acc - }) -} - -fn generate_syscall_map>(root: P) -> Result<(), std::io::Error> { - let syscalls = collect_syscalls(root); - - let out_dir = std::env::var("OUT_DIR").unwrap(); - let out_path = Path::new(&out_dir).join("syscall_dispatcher.in"); - let mut file = File::create(out_path)?; - - writeln!(file, "// This file is @generated by build.rs. Do not edit!")?; - writeln!(file)?; - writeln!(file, "match number {{")?; - - for (name, number) in &syscalls { - writeln!(file, " {number} => entry_{name}(args),")?; - } - - writeln!( - file, - " _ => panic!(\"Unknown syscall number: {{}}\", number)," - )?; - writeln!(file, "}}")?; + let syscall_match = quote::quote! { + // This match statement is @generated by build.rs. Do not edit. + match number { + #(#arms)* + _ => panic!("Unknown syscall number: {}", number), + } + }; + writeln!(file, "{syscall_match}")?; Ok(()) } @@ -274,9 +234,7 @@ fn is_syscall(attrs: &[Attribute], name: &str) -> Option { None } -type SyscallData = u16; - -fn collect_syscalls>(root: P) -> HashMap { +fn find_syscalls(root: &Path) -> HashMap { let mut syscalls = HashMap::new(); let mut numbers = HashMap::new(); @@ -329,59 +287,3 @@ fn collect_syscalls>(root: P) -> HashMap { syscalls } - -type SyscallDataExport = (u16, Punctuated); - -fn collect_syscalls_export>(root: P) -> HashMap { - let mut syscalls = HashMap::new(); - let mut numbers = HashMap::new(); - - for entry in WalkDir::new(&root) { - let entry = match entry { - Ok(entry) => entry, - Err(_) => continue, - }; - - if entry.file_type().is_file() { - let path = entry.path(); - - println!("Processing file: {}", path.display()); - - let contents = match std::fs::read_to_string(path) { - Ok(contents) => contents, - Err(_) => continue, - }; - - let file = match syn::parse_file(&contents) { - Ok(file) => file, - Err(_) => continue, - }; - - for item in file.items { - let item = match item { - syn::Item::Fn(item) => item, - _ => continue, - }; - - let name = item.sig.ident.to_string(); - - if let Some(num) = is_syscall(&item.attrs, &name) { - if syscalls.contains_key(&name) { - println!("cargo:warning=Duplicate syscall handler: {name}"); - continue; - } - - if numbers.contains_key(&num) { - println!("cargo:warning=Duplicate syscall number: {num} for {name}"); - continue; - } - - syscalls.insert(name.clone(), (num, item.sig.inputs)); - numbers.insert(num, name); - } - } - } - } - - syscalls -} diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..0c03ce5 --- /dev/null +++ b/config.toml @@ -0,0 +1,5 @@ +[env] +OSIRIS_STACKPAGES = "1" + +[build] +target = "host-tuple" diff --git a/examples/hello-world/Cargo.toml b/examples/hello-world/Cargo.toml index 8f7248e..a86e875 100644 --- a/examples/hello-world/Cargo.toml +++ b/examples/hello-world/Cargo.toml @@ -3,8 +3,26 @@ name = "hello-world" version = "0.1.0" edition = "2024" +[[bin]] +name = "hello-world" +path = "src/main.rs" +test = false +bench = false +doctest = false + [dependencies] -osiris = { path = "../../" } +osiris = { workspace = true } [build-dependencies] cfg_aliases = "0.2.1" + +[profile.dev] +panic = "abort" +strip = false +opt-level = 2 + +[profile.release] +panic = "abort" +opt-level = "z" +codegen-units = 1 +lto = true diff --git a/examples/hello-world/src/main.rs b/examples/hello-world/src/main.rs index 351dd79..4bad2e0 100644 --- a/examples/hello-world/src/main.rs +++ b/examples/hello-world/src/main.rs @@ -1,13 +1,29 @@ #![no_std] #![no_main] -#[unsafe(no_mangle)] -extern "C" fn main() { - osiris::syscall_print(0, "Hello World!".as_bytes().as_ptr(), 12); +use osiris::app_main; + +extern "C" fn second_thread() { + let mut time = osiris::uapi::time::tick(); + let mut cnt = 0; + loop { + time += 100; + osiris::uprintln!("Number: {}", cnt); + cnt += 1; + osiris::uapi::sched::sleep(time); + } } -#[cfg(freestanding)] -#[panic_handler] -fn panic(_info: &core::panic::PanicInfo) -> ! { - loop {} +#[app_main] +fn main() { + osiris::uprintln!("Hello World!"); + let mut tick = 0; + let attrs = osiris::uapi::sched::RtAttrs { deadline: 100, period: 100, budget: 100 }; + + osiris::uapi::sched::spawn_thread(second_thread, Some(attrs)); + loop { + osiris::uprintln!("Tick: {}", tick); + tick += 1; + osiris::uapi::sched::sleep_for(1000); + } } diff --git a/interface/Cargo.lock b/interface/Cargo.lock deleted file mode 100644 index adeff39..0000000 --- a/interface/Cargo.lock +++ /dev/null @@ -1,473 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 4 - -[[package]] -name = "anstream" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" -dependencies = [ - "anstyle", - "anstyle-parse", - "anstyle-query", - "anstyle-wincon", - "colorchoice", - "is_terminal_polyfill", - "utf8parse", -] - -[[package]] -name = "anstyle" -version = "1.0.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" - -[[package]] -name = "anstyle-parse" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" -dependencies = [ - "utf8parse", -] - -[[package]] -name = "anstyle-query" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" -dependencies = [ - "windows-sys", -] - -[[package]] -name = "anstyle-wincon" -version = "3.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" -dependencies = [ - "anstyle", - "once_cell_polyfill", - "windows-sys", -] - -[[package]] -name = "bitflags" -version = "2.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" - -[[package]] -name = "bytemuck" -version = "1.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" -dependencies = [ - "bytemuck_derive", -] - -[[package]] -name = "bytemuck_derive" -version = "1.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "cbindgen" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadd868a2ce9ca38de7eeafdcec9c7065ef89b42b32f0839278d55f35c54d1ff" -dependencies = [ - "clap", - "heck", - "indexmap", - "log", - "proc-macro2", - "quote", - "serde", - "serde_json", - "syn", - "tempfile", - "toml", -] - -[[package]] -name = "cfg-if" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" - -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - -[[package]] -name = "clap" -version = "4.5.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" -dependencies = [ - "clap_builder", -] - -[[package]] -name = "clap_builder" -version = "4.5.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" -dependencies = [ - "anstream", - "anstyle", - "clap_lex", - "strsim", -] - -[[package]] -name = "clap_lex" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" - -[[package]] -name = "colorchoice" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" - -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - -[[package]] -name = "errno" -version = "0.3.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" -dependencies = [ - "libc", - "windows-sys", -] - -[[package]] -name = "fastrand" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" - -[[package]] -name = "getrandom" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" -dependencies = [ - "cfg-if", - "libc", - "r-efi", - "wasip2", -] - -[[package]] -name = "hashbrown" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "indexmap" -version = "2.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "interface" -version = "0.1.0" -dependencies = [ - "bytemuck", - "cbindgen", - "cfg_aliases", -] - -[[package]] -name = "is_terminal_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" - -[[package]] -name = "itoa" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" - -[[package]] -name = "libc" -version = "0.2.180" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" - -[[package]] -name = "linux-raw-sys" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" - -[[package]] -name = "log" -version = "0.4.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" - -[[package]] -name = "memchr" -version = "2.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" - -[[package]] -name = "once_cell" -version = "1.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" - -[[package]] -name = "once_cell_polyfill" -version = "1.70.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" - -[[package]] -name = "proc-macro2" -version = "1.0.105" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "quote" -version = "1.0.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "r-efi" -version = "5.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" - -[[package]] -name = "rustix" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" -dependencies = [ - "bitflags", - "errno", - "libc", - "linux-raw-sys", - "windows-sys", -] - -[[package]] -name = "serde" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" -dependencies = [ - "serde_core", - "serde_derive", -] - -[[package]] -name = "serde_core" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.228" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.149" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" -dependencies = [ - "itoa", - "memchr", - "serde", - "serde_core", - "zmij", -] - -[[package]] -name = "serde_spanned" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" -dependencies = [ - "serde", -] - -[[package]] -name = "strsim" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" - -[[package]] -name = "syn" -version = "2.0.114" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "tempfile" -version = "3.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" -dependencies = [ - "fastrand", - "getrandom", - "once_cell", - "rustix", - "windows-sys", -] - -[[package]] -name = "toml" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", -] - -[[package]] -name = "toml_datetime" -version = "0.6.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" -dependencies = [ - "serde", -] - -[[package]] -name = "toml_edit" -version = "0.22.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" -dependencies = [ - "indexmap", - "serde", - "serde_spanned", - "toml_datetime", - "toml_write", - "winnow", -] - -[[package]] -name = "toml_write" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" - -[[package]] -name = "unicode-ident" -version = "1.0.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" - -[[package]] -name = "utf8parse" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" - -[[package]] -name = "wasip2" -version = "1.0.1+wasi-0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" -dependencies = [ - "wit-bindgen", -] - -[[package]] -name = "windows-link" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" - -[[package]] -name = "windows-sys" -version = "0.61.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" -dependencies = [ - "windows-link", -] - -[[package]] -name = "winnow" -version = "0.7.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" -dependencies = [ - "memchr", -] - -[[package]] -name = "wit-bindgen" -version = "0.46.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" - -[[package]] -name = "zmij" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc5a66a20078bf1251bde995aa2fdcc4b800c70b5d92dd2c62abc5c60f679f8" diff --git a/interface/Cargo.toml b/interface/Cargo.toml deleted file mode 100644 index a435764..0000000 --- a/interface/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "interface" -version = "0.1.0" -edition = "2024" - -[dependencies] -bytemuck = { version = "1.24.0", features = ["derive"] } - - -[build-dependencies] -cfg_aliases = "0.2.1" -cbindgen = "0.28.0" diff --git a/interface/build.rs b/interface/build.rs deleted file mode 100644 index 2dd23a2..0000000 --- a/interface/build.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::env; - -use cfg_aliases::cfg_aliases; - -fn main() { - cfg_aliases! { - freestanding: { all(not(test), not(doctest), not(doc), not(kani), any(target_os = "none", target_os = "unknown")) }, - } - - generate_c_api(); -} - -fn generate_c_api() { - let crate_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); - - let config: cbindgen::Config = cbindgen::Config { - no_includes: true, - includes: vec![ - "stdint.h".to_string(), - "stdbool.h".to_string(), - "stdarg.h".to_string(), - ], - layout: cbindgen::LayoutConfig { - packed: Some("__attribute__((packed))".to_string()), - ..Default::default() - }, - language: cbindgen::Language::C, - cpp_compat: false, - ..Default::default() - }; - - cbindgen::Builder::new() - .with_crate(crate_dir) - .with_config(config) - .generate() - .map_or_else( - |error| match error { - cbindgen::Error::ParseSyntaxError { .. } => {} - e => panic!("{e:?}"), - }, - |bindings| { - bindings.write_to_file("include/bindings.h"); - }, - ); -} diff --git a/interface/include/bindings.h b/interface/include/bindings.h deleted file mode 100644 index dd7b765..0000000 --- a/interface/include/bindings.h +++ /dev/null @@ -1,76 +0,0 @@ -#include "stdint.h" -#include "stdbool.h" -#include "stdarg.h" - -#define BOOT_INFO_MAGIC 221566477 - -/** - * The memory map entry type. - * - * This structure shall be compatible with the multiboot_memory_map_t struct at - * Link: [https://www.gnu.org/software/grub/manual/multiboot/multiboot.html]() - */ -typedef struct __attribute__((packed)) MemMapEntry { - /** - * The size of the entry. - */ - uint32_t size; - /** - * The base address of the memory region. - */ - uint64_t addr; - /** - * The length of the memory region. - */ - uint64_t length; - /** - * The type of the memory region. - */ - uint32_t ty; -} MemMapEntry; - -typedef struct InitDescriptor { - /** - * Pointer to the start of the binary of the init program. - */ - uint64_t begin; - /** - * Length of the binary of the init program. - */ - uint64_t len; - uint64_t entry_offset; -} InitDescriptor; - -typedef struct Args { - struct InitDescriptor init; -} Args; - -/** - * The boot information structure. - */ -typedef struct BootInfo { - /** - * The magic number that indicates valid boot information. - */ - uint32_t magic; - /** - * The version of the boot information structure. - */ - uint32_t version; - /** - * The implementer of the processor. - * The variant of the processor. - * The memory map. - */ - struct MemMapEntry mmap[8]; - /** - * The length of the memory map. - */ - uint64_t mmap_len; - /** - * The command line arguments. - */ - struct Args args; -} BootInfo; - -extern void kernel_init(const struct BootInfo *boot_info); diff --git a/interface/src/lib.rs b/interface/src/lib.rs deleted file mode 100644 index ad78f0a..0000000 --- a/interface/src/lib.rs +++ /dev/null @@ -1,82 +0,0 @@ -#![cfg_attr(freestanding, no_std)] - -/// The memory map entry type. -/// -/// This structure shall be compatible with the multiboot_memory_map_t struct at -/// Link: [https://www.gnu.org/software/grub/manual/multiboot/multiboot.html]() -#[repr(packed, C)] -#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)] -pub struct MemMapEntry { - /// The size of the entry. - pub size: u32, - /// The base address of the memory region. - pub addr: u64, - /// The length of the memory region. - pub length: u64, - /// The type of the memory region. - pub ty: u32, -} - -#[cfg(kani)] -impl kani::Arbitrary for MemMapEntry { - fn any() -> Self { - let size: u32 = kani::any_where(|&x| x % size_of::() as u32 == 0); - let length = kani::any(); - let addr = kani::any(); - - kani::assume(addr > 0); - - MemMapEntry { - size, - addr, - length, - ty: kani::any(), - } - } - - fn any_array() -> [Self; MAX_ARRAY_LENGTH] { - [(); MAX_ARRAY_LENGTH].map(|_| Self::any()) - } -} - -#[repr(C)] -#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)] -pub struct InitDescriptor { - /// Pointer to the start of the binary of the init program. - pub begin: u64, - /// Length of the binary of the init program. - pub len: u64, - pub entry_offset: u64, -} - -#[repr(C)] -#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)] -pub struct Args { - pub init: InitDescriptor, -} - -pub const BOOT_INFO_MAGIC: u32 = 0xD34D60D; - -/// The boot information structure. -#[repr(C)] -#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)] -pub struct BootInfo { - /// The magic number that indicates valid boot information. - pub magic: u32, - /// The version of the boot information structure. - pub version: u32, - /// The implementer of the processor. - //pub implementer: u64, - /// The variant of the processor. - //pub variant: u64, - /// The memory map. - pub mmap: [MemMapEntry; 8], - /// The length of the memory map. - pub mmap_len: u64, - /// The command line arguments. - pub args: Args, -} - -unsafe extern "C" { - pub fn kernel_init(boot_info: *const BootInfo) -> !; -} diff --git a/justfile b/justfile index c1d3dd4..b14c0c4 100644 --- a/justfile +++ b/justfile @@ -10,7 +10,6 @@ pack *args: example name *args: (build args) cargo build -p {{name}} {{args}} - cargo xtask pack --output {{name}}.bin --init examples/{{name}} {{args}} fmt *args: cargo fmt {{args}} diff --git a/machine/api/src/lib.rs b/machine/api/src/lib.rs index a6330a6..f4ebc52 100644 --- a/machine/api/src/lib.rs +++ b/machine/api/src/lib.rs @@ -2,6 +2,7 @@ use core::{fmt::Display, ops::Range}; +pub mod mem; pub mod stack; #[derive(Default, Debug, PartialEq, Eq, Clone)] @@ -10,6 +11,7 @@ pub enum Error { Generic, OutOfMemory(usize), OutOfBoundsPtr(usize, Range), + InvalidAddress(usize), } pub enum Fault { @@ -30,7 +32,8 @@ impl Display for Error { "Pointer {:p} out of bounds (expected in {:p}..{:p})", *ptr as *const u8, range.start as *const u8, range.end as *const u8 ) - } + }, + Error::InvalidAddress(addr) => write!(f, "Invalid address {:p}", *addr as *const u8), } } } @@ -44,6 +47,11 @@ pub trait Machinelike { fn bench_start(); fn bench_end() -> (u32, f32); + fn monotonic_now() -> u64; + fn monotonic_freq() -> u64; + // Returns the frequency of the machine's systick timer in Hz. + fn systick_freq() -> u64; + type ExcepBacktrace: Display; type ExcepStackFrame: Display; fn backtrace(initial_fp: *const usize, stack_ptr: *const usize) -> Self::ExcepBacktrace; diff --git a/machine/api/src/mem.rs b/machine/api/src/mem.rs new file mode 100644 index 0000000..d3a6223 --- /dev/null +++ b/machine/api/src/mem.rs @@ -0,0 +1,146 @@ +use core::{fmt::{Display, LowerHex, UpperHex}, ops::{Add, Div, Rem, Sub}, ptr::NonNull}; + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub struct PhysAddr(usize); + +impl PhysAddr { + pub const MAX: Self = Self(usize::MAX); + + #[inline] + pub fn new(addr: usize) -> Self { + Self(addr) + } + + #[inline] + pub fn as_usize(&self) -> usize { + self.0 + } + + pub fn as_mut_ptr(&self) -> *mut T { + self.0 as *mut T + } + + pub fn checked_add(&self, other: usize) -> Option { + self.0.checked_add(other).map(Self) + } + + pub fn checked_sub(&self, other: usize) -> Option { + self.0.checked_sub(other).map(Self) + } + + pub fn is_multiple_of(&self, align: usize) -> bool { + self.0.is_multiple_of(align) + } + + pub fn diff(&self, other: Self) -> usize { + if self.0 >= other.0 { + // Cannot underflow because of the check above. + self.0.checked_sub(other.0).unwrap() + } else { + // Cannot underflow because of the check above. + other.0.checked_sub(self.0).unwrap() + } + } +} + +impl From> for PhysAddr { + #[inline] + fn from(ptr: NonNull) -> Self { + Self(ptr.as_ptr() as usize) + } +} + +impl Display for PhysAddr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "0x{:x}", self.0) + } +} + +impl Add for PhysAddr { + type Output = Self; + + #[inline] + fn add(self, rhs: usize) -> Self::Output { + Self(self.0 + rhs) + } +} + +impl Sub for PhysAddr { + type Output = Self; + + #[inline] + fn sub(self, rhs: usize) -> Self::Output { + Self(self.0 - rhs) + } +} + +impl Div for PhysAddr { + type Output = Self; + + #[inline] + fn div(self, rhs: usize) -> Self::Output { + Self(self.0 / rhs) + } +} + +impl Rem for PhysAddr { + type Output = Self; + + #[inline] + fn rem(self, rhs: usize) -> Self::Output { + Self(self.0 % rhs) + } +} + +impl From for usize { + #[inline] + fn from(addr: PhysAddr) -> Self { + addr.0 + } +} + +impl LowerHex for PhysAddr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{:x}", self.0) + } +} + +impl UpperHex for PhysAddr { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{:X}", self.0) + } +} + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub struct VirtAddr(usize); + +impl VirtAddr { + #[inline] + pub fn new(addr: usize) -> Self { + Self(addr) + } + + #[inline] + pub fn as_usize(&self) -> usize { + self.0 + } + + #[inline] + pub fn saturating_add(&self, other: usize) -> Self { + Self(self.0.saturating_add(other)) + } + + #[inline] + pub fn saturating_sub(&self, other: usize) -> Self { + Self(self.0.saturating_sub(other)) + } +} + +impl From for usize { + #[inline] + fn from(addr: VirtAddr) -> Self { + addr.0 + } +} \ No newline at end of file diff --git a/machine/api/src/stack.rs b/machine/api/src/stack.rs index db226a0..11604f1 100644 --- a/machine/api/src/stack.rs +++ b/machine/api/src/stack.rs @@ -1,19 +1,21 @@ -use core::{ffi::c_void, num::NonZero, ptr::NonNull}; +use core::{ffi::c_void, num::NonZero}; +use crate::{Result, mem::PhysAddr}; -use crate::Result; +pub type EntryFn = extern "C" fn(); +pub type FinFn = extern "C" fn() -> !; -pub struct StackDescriptor { - pub top: NonNull, +pub struct Descriptor { + pub top: PhysAddr, pub size: NonZero, - pub entry: extern "C" fn(), - pub fin: Option !>, + pub entry: EntryFn, + pub fin: Option, } pub trait Stacklike { type ElemSize: Copy; type StackPtr; - unsafe fn new(desc: StackDescriptor) -> Result + unsafe fn new(desc: Descriptor) -> Result where Self: Sized; diff --git a/machine/arm/Cargo.toml b/machine/arm/Cargo.toml index 6aef529..bafea9d 100644 --- a/machine/arm/Cargo.toml +++ b/machine/arm/Cargo.toml @@ -1,19 +1,16 @@ [package] name = "hal-arm" version = "0.1.0" -rust-version = "1.85.0" authors = ["Thomas Wachter"] edition = "2024" build = "build.rs" -# Through this we can access env variables set by the build script through DEP_HAL_ -# It has nothing to do with native libraries. -links = "halarm" [lib] crate-type = ["rlib"] [dependencies] hal-api = { path = "../api" } +critical-section = { version = "1.2", features = ["restore-state-usize"] } [build-dependencies] cbindgen = "0.28.0" diff --git a/machine/arm/build.rs b/machine/arm/build.rs index 3ff8569..57df5f2 100644 --- a/machine/arm/build.rs +++ b/machine/arm/build.rs @@ -303,13 +303,12 @@ fn merge_compile_commands(files: &[String]) -> String { /// # Returns /// /// PathBuf pointing to the workspace root directory -fn workspace_dir() -> PathBuf { +fn workspace_dir() -> Option { let output = Command::new("cargo") .args(["locate-project", "--workspace", "--message-format=plain"]) - .output() - .expect("failed to run cargo locate-project"); + .output().ok()?; let path = String::from_utf8(output.stdout).expect("utf8"); - PathBuf::from(path.trim()).parent().unwrap().to_path_buf() + Some(PathBuf::from(path.trim()).parent()?.to_path_buf()) } /// Main build script entry point. @@ -337,7 +336,8 @@ fn workspace_dir() -> PathBuf { /// /// Exits with error code 1 if any critical build step fails fn main() { - let out = env::var("OUT_DIR").unwrap_or("src".to_string()); + let out = env::var("OUT_DIR").unwrap(); + println!("cargo::rustc-link-search={out}"); let hal = fail_on_error(env::var("OSIRIS_ARM_HAL").with_context( || "OSIRIS_ARM_HAL environment variable not set. Please set it to the path of the ARM HAL.", @@ -363,7 +363,7 @@ fn main() { let libhal = libhal_config.build(); println!("cargo::rustc-link-search=native={}", libhal.display()); - println!("cargo::metadata=linker-script={out}/link.ld"); + println!("cargo::rerun-if-changed={out}/link.ld"); // Extract compile commands for HAL let hal_cc = build_dir.join("compile_commands.json"); @@ -387,8 +387,10 @@ fn main() { // Merge and export compile_commands.json for IDE integration let merged = merge_compile_commands(&[hal_cc, common_cc]); - let project_root = workspace_dir(); - let out_file = project_root.join("compile_commands.json"); - - fs::write(out_file, merged).expect("write merged compile_commands.json"); + if let Some(project_root) = workspace_dir() { + let out_file = project_root.join("compile_commands.json"); + fs::write(out_file, merged).expect("write merged compile_commands.json"); + } else { + println!("cargo::warning=Could not determine workspace root, skipping compile_commands.json generation."); + } } diff --git a/machine/arm/common/CMakeLists.txt b/machine/arm/common/CMakeLists.txt index c9111b7..0346c0d 100644 --- a/machine/arm/common/CMakeLists.txt +++ b/machine/arm/common/CMakeLists.txt @@ -28,18 +28,11 @@ foreach(var ${_cache_vars}) endif() endforeach() -# This sets up PIC for .data/.bss access by making all accesses relative to r9. -# r9 is initialized in crt0.S to point to the base of the .data section. -# We need this because the offset between .text and .data is not known at compile time. (relocatable) -add_compile_options(-msingle-pic-base -mpic-register=r9 -mno-pic-data-is-text-relative) - set_property(SOURCE ivt.S APPEND PROPERTY COMPILE_OPTIONS "-x" "assembler-with-cpp") set_property(SOURCE irq.S APPEND PROPERTY COMPILE_OPTIONS "-x" "assembler-with-cpp") -set_property(SOURCE crt0.S APPEND PROPERTY COMPILE_OPTIONS "-fno-pic") add_library(common STATIC ivt.S - entry.c syscall.c irq.S crt0.S diff --git a/machine/arm/common/crt0.S b/machine/arm/common/crt0.S index 215c85e..ff93ecd 100644 --- a/machine/arm/common/crt0.S +++ b/machine/arm/common/crt0.S @@ -10,13 +10,6 @@ .thumb_func .global bootstrap bootstrap: - /* - * Initialize r9 to point to the start of the .data section. - * This is needed because all .data/.bss accesses are relative to r9. - * We need this because the offset between .text and .data is not known at compile time (relocatable). - */ - ldr r9, =__data_start - @ Copy initialized data from flash to RAM. ldr r0, =__data ldr r1, =__data_start @@ -35,7 +28,7 @@ bootstrap: strlt r3, [r1], #4 blt 2b @ Call the pre_init function. - bl pre_init + bl kernel_init @ If main returns, loop forever. hang: b hang \ No newline at end of file diff --git a/machine/arm/common/entry.c b/machine/arm/common/entry.c deleted file mode 100644 index 52dad42..0000000 --- a/machine/arm/common/entry.c +++ /dev/null @@ -1,50 +0,0 @@ - -#include -#include "mem.h" - -#include - -typedef void (*func_t)(void); - -extern func_t __init_array_start; -extern func_t __init_array_end; -extern func_t __fini_array_start; -extern func_t __fini_array_end; - -extern void pre_init(void) __attribute__((noreturn)); -extern void init_mem_maps(BootInfo *boot_info); - -__attribute__((section(".bootinfo"), used, aligned(4))) -static BootInfo _boot_info = { - .magic = BOOT_INFO_MAGIC, - .version = 1, - .mmap = {0}, - .mmap_len = 0, - .args = {.init = {0}}, -}; - -void call_constructors(void) -{ - for (func_t *func = &__init_array_start; func < &__init_array_end; func++) - { - (*func)(); - } -} - -void call_destructors(void) -{ - for (func_t *func = &__fini_array_start; func < &__fini_array_end; func++) - { - (*func)(); - } -} - -void pre_init(void) -{ - // Init memory maps, etc. - init_mem_maps(&_boot_info); - - // Boot! - kernel_init(&_boot_info); - unreachable(); -} diff --git a/machine/arm/src/asm.rs b/machine/arm/src/asm.rs index 9f12f75..1ba1ebb 100644 --- a/machine/arm/src/asm.rs +++ b/machine/arm/src/asm.rs @@ -20,33 +20,84 @@ pub use crate::__macro_nop as nop; #[macro_export] macro_rules! __macro_syscall { ($num:expr) => { - use core::arch::asm; - unsafe { - asm!("svc {0}", const $num); + { + use core::arch::asm; + let ret: isize; + unsafe { + asm!( + "svc {num}", + lateout("r0") ret, + num = const $num, + clobber_abi("C") + ); + } + ret } }; ($num:expr, $arg0:expr) => { - use core::arch::asm; - unsafe { - asm!("mov r0, {0}", "svc {1}", in(reg)$arg0, const $num); + { + use core::arch::asm; + let ret: isize; + unsafe { + asm!( + "svc {num}", + inlateout("r0") $arg0 => ret, + num = const $num, + clobber_abi("C") + ); + } + ret } }; ($num:expr, $arg0:expr, $arg1:expr) => { - use core::arch::asm; - unsafe { - asm!("mov r0, {0}", "mov r1, {1}", "svc {2}", in(reg)$arg0, in(reg)$arg1, const $num); + { + use core::arch::asm; + let ret: isize; + unsafe { + asm!( + "svc {num}", + inlateout("r0") $arg0 => ret, + in("r1") $arg1, + num = const $num, + clobber_abi("C") + ); + } + ret } }; ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr) => { - use core::arch::asm; - unsafe { - asm!("mov r0, {0}", "mov r1, {1}", "mov r2, {2}", "svc {3}", in(reg)$arg0, in(reg)$arg1, in(reg)$arg2, const $num); + { + use core::arch::asm; + let ret: isize; + unsafe { + asm!( + "svc {num}", + inlateout("r0") $arg0 => ret, + in("r1") $arg1, + in("r2") $arg2, + num = const $num, + clobber_abi("C") + ); + } + ret } }; ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) => { - use core::arch::asm; - unsafe { - asm!("mov r0, {0}", "mov r1, {1}", "mov r2, {2}", "mov r3, {3}", "svc {4}", in(reg)$arg0, in(reg)$arg1, in(reg)$arg2, in(reg)$arg3, const $num); + { + use core::arch::asm; + let ret: isize; + unsafe { + asm!( + "svc {num}", + inlateout("r0") $arg0 => ret, + in("r1") $arg1, + in("r2") $arg2, + in("r3") $arg3, + num = const $num, + clobber_abi("C") + ); + } + ret } }; } @@ -54,28 +105,39 @@ macro_rules! __macro_syscall { #[cfg(feature = "host")] #[macro_export] macro_rules! __macro_syscall { - ($num:expr) => {{}}; - ($num:expr, $arg0:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) => {{}}; + ($num:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) => {{ 0isize }}; } pub use crate::__macro_syscall as syscall; #[cfg(not(feature = "host"))] #[inline(always)] -pub fn disable_interrupts() { +pub fn disable_irq_save() -> usize { use core::arch::asm; - use core::sync::atomic::compiler_fence; - unsafe { asm!("cpsid i", options(nomem, nostack, preserves_flags)) }; - compiler_fence(core::sync::atomic::Ordering::SeqCst); + let old: usize; + + unsafe { + asm!( + "mrs {old}, primask", + "cpsid i", + "isb", + old = out(reg) old, + options(nostack, preserves_flags) + ); + } + old } #[cfg(feature = "host")] #[inline(always)] -pub fn disable_interrupts() {} +pub fn disable_irq_save() -> usize { + 0 +} #[cfg(not(feature = "host"))] #[inline(always)] @@ -97,17 +159,23 @@ pub fn are_interrupts_enabled() -> bool { #[cfg(not(feature = "host"))] #[inline(always)] -pub fn enable_interrupts() { +pub fn enable_irq_restr(state: usize) { use core::arch::asm; - use core::sync::atomic::compiler_fence; - - unsafe { asm!("cpsie i", options(nomem, nostack, preserves_flags)) }; - compiler_fence(core::sync::atomic::Ordering::SeqCst); + + unsafe { + asm!( + "dsb", + "msr primask, {state}", + "isb", + state = in(reg) state, + options(nostack, preserves_flags) + ); + } } #[cfg(feature = "host")] #[inline(always)] -pub fn enable_interrupts() {} +pub fn enable_irq_restr(state: usize) {} #[cfg(not(feature = "host"))] #[macro_export] diff --git a/machine/arm/src/crit.rs b/machine/arm/src/crit.rs new file mode 100644 index 0000000..134f2f5 --- /dev/null +++ b/machine/arm/src/crit.rs @@ -0,0 +1,14 @@ +use critical_section::RawRestoreState; + +struct CriticalSection; +critical_section::set_impl!(CriticalSection); + +unsafe impl critical_section::Impl for CriticalSection { + unsafe fn acquire() -> RawRestoreState { + crate::asm::disable_irq_save() + } + + unsafe fn release(token: RawRestoreState) { + crate::asm::enable_irq_restr(token); + } +} \ No newline at end of file diff --git a/machine/arm/src/excep.rs b/machine/arm/src/excep.rs index 4e50bbc..ab51268 100644 --- a/machine/arm/src/excep.rs +++ b/machine/arm/src/excep.rs @@ -1,4 +1,5 @@ use core::fmt::Display; +use core::mem::align_of; #[repr(C)] pub struct ExcepStackFrame { @@ -43,6 +44,11 @@ impl Display for ExcepStackFrame { const BACKTRACE_MAX_FRAMES: usize = 20; +#[inline] +fn is_call_aligned(ptr: *const usize) -> bool { + (ptr as usize).is_multiple_of(align_of::()) +} + #[repr(C)] pub struct ExcepBacktrace { stack_frame: ExcepStackFrame, @@ -79,6 +85,11 @@ impl Display for ExcepBacktrace { writeln!(f, "0: 0x{:08x}", self.stack_frame.pc)?; } + if fp.is_null() || !is_call_aligned(fp) { + writeln!(f, "", fp as usize)?; + return writeln!(f); + } + for i in 1..BACKTRACE_MAX_FRAMES { // Read the return address from the stack. let ret_addr = unsafe { fp.add(1).read_volatile() }; @@ -89,6 +100,9 @@ impl Display for ExcepBacktrace { break; } + // Return addresses in Thumb mode carry bit0 = 1. + let ret_addr = ret_addr & !1; + // Print the return address. if let Some(symbol) = crate::debug::find_nearest_symbol(ret_addr) { writeln!(f, "{i}: {symbol} (0x{ret_addr:08x})")?; @@ -101,6 +115,19 @@ impl Display for ExcepBacktrace { break; } + let fp_addr = fp as usize; + let next_fp_addr = next_fp; + + if next_fp_addr <= fp_addr { + writeln!(f, "")?; + break; + } + + if !is_call_aligned(next_fp_addr as *const usize) { + writeln!(f, "")?; + break; + } + // Move to the next frame. fp = next_fp as *const usize; diff --git a/machine/arm/src/lib.rs b/machine/arm/src/lib.rs index 389d14b..46ba0b9 100644 --- a/machine/arm/src/lib.rs +++ b/machine/arm/src/lib.rs @@ -10,6 +10,7 @@ pub mod excep; pub mod panic; pub mod sched; +mod crit; mod print; mod bindings { @@ -40,14 +41,14 @@ impl hal_api::Machinelike for ArmMachine { fn print(s: &str) -> Result<()> { use crate::asm; - asm::disable_interrupts(); + let state = asm::disable_irq_save(); if (unsafe { bindings::write_debug_uart(s.as_ptr() as *const c_char, s.len() as i32) } != 0) { - asm::enable_interrupts(); + asm::enable_irq_restr(state); Ok(()) } else { - asm::enable_interrupts(); + asm::enable_irq_restr(state); Err(hal_api::Error::default()) } } @@ -65,6 +66,18 @@ impl hal_api::Machinelike for ArmMachine { (cycles as u32, ns) } + fn monotonic_now() -> u64 { + unsafe { bindings::monotonic_now() } + } + + fn monotonic_freq() -> u64 { + unsafe { bindings::monotonic_freq() } + } + + fn systick_freq() -> u64 { + unsafe { bindings::systick_freq() } + } + type ExcepBacktrace = excep::ExcepBacktrace; type ExcepStackFrame = excep::ExcepStackFrame; diff --git a/machine/arm/src/panic.rs b/machine/arm/src/panic.rs index 3d6dfbc..946ec21 100644 --- a/machine/arm/src/panic.rs +++ b/machine/arm/src/panic.rs @@ -6,9 +6,7 @@ use core::panic::PanicInfo; use crate::asm; pub fn panic_handler(_info: &PanicInfo) -> ! { - asm::disable_interrupts(); - - loop { - core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); - } + asm::disable_irq_save(); + #[allow(clippy::empty_loop)] + loop {} } diff --git a/machine/arm/src/sched.rs b/machine/arm/src/sched.rs index 18b463f..9521f33 100644 --- a/machine/arm/src/sched.rs +++ b/machine/arm/src/sched.rs @@ -7,7 +7,7 @@ use core::{ ptr::NonNull, }; -use hal_api::{Result, stack::StackDescriptor}; +use hal_api::{Result, stack::Descriptor}; use crate::print::println; @@ -55,7 +55,7 @@ impl Add for StackPtr { } /// A stack on arm is 4 byte aligned and grows downwards. -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] pub struct ArmStack { /// The top of the stack (highest address). /// Safety: NonNull can safely be covariant over u32. @@ -72,7 +72,7 @@ impl ArmStack { } fn is_call_aligned(sp: StackPtr) -> bool { - (sp.offset % 2) == 0 + sp.offset.is_multiple_of(2) } fn in_bounds(&self, sp: *mut u32) -> Option { @@ -191,17 +191,20 @@ impl hal_api::stack::Stacklike for ArmStack { type ElemSize = u32; type StackPtr = StackPtr; - unsafe fn new(desc: StackDescriptor) -> Result + unsafe fn new(desc: Descriptor) -> Result where Self: Sized, { - let StackDescriptor { + let Descriptor { top, size, entry, fin, } = desc; + // We expect a PhysAddr, which can be converted to a ptr on nommu. + let top = NonNull::new(top.as_mut_ptr::()).ok_or(hal_api::Error::InvalidAddress(top.as_usize()))?; + let mut stack = Self { top, sp: StackPtr { offset: 0 }, diff --git a/machine/arm/stm32l4xx/CMakeLists.txt b/machine/arm/stm32l4xx/CMakeLists.txt index 569d51b..9410027 100644 --- a/machine/arm/stm32l4xx/CMakeLists.txt +++ b/machine/arm/stm32l4xx/CMakeLists.txt @@ -30,11 +30,6 @@ foreach(var ${_cache_vars}) endif() endforeach() -# This sets up PIC for .data/.bss access by making all accesses relative to r9. -# r9 is initialized in crt0.S to point to the base of the .data section. -# We need this because the offset between .text and .data is not known at compile time. (relocatable) -add_compile_options(-msingle-pic-base -mpic-register=r9 -mno-pic-data-is-text-relative) - # this will compile our variant_stm32l4xx library add_subdirectory(${OSIRIS_ARM_STM32L4XX_VARIANT}) @@ -49,13 +44,12 @@ set(LINKER_SCRIPT_OUT ${OUT_DIR}/link.ld) # We track environment variables that start with OSIRIS_ as dependencies as they can change the output of the compilation set(DEPS_FILE ${OUT_DIR}/deps.txt) -file(WRITE ${DEPS_FILE} "${CONFIG_DEFINES}") +file(GENERATE OUTPUT ${DEPS_FILE} CONTENT "${CONFIG_DEFINES}") add_custom_command( OUTPUT ${LINKER_SCRIPT_OUT} COMMAND ${CMAKE_C_COMPILER} -E -P -x c ${CONFIG_DEFINES} - -DMCU_HEADER=\"${OSIRIS_ARM_STM32L4XX_VARIANT}/link.ld\" ${LINKER_SCRIPT_IN} -o ${LINKER_SCRIPT_OUT} DEPENDS ${LINKER_SCRIPT_IN} ${DEPS_FILE} diff --git a/machine/arm/stm32l4xx/interface/clock.c b/machine/arm/stm32l4xx/interface/clock.c new file mode 100644 index 0000000..4110c3c --- /dev/null +++ b/machine/arm/stm32l4xx/interface/clock.c @@ -0,0 +1,130 @@ +#include "lib.h" +#include + +static volatile uint64_t monotonic_hi = 0; + +static void init_monotonic_timer(void) +{ + const uint32_t target_hz = 1000000U; + uint32_t tim_clk = HAL_RCC_GetPCLK1Freq(); + + monotonic_hi = 0; + + // If APB1 prescaler is not 1, timer clocks run at 2x PCLK1. + if ((RCC->CFGR & RCC_CFGR_PPRE1) != RCC_CFGR_PPRE1_DIV1) { + tim_clk *= 2U; + } + + const uint32_t prescaler = (tim_clk / target_hz) - 1U; + + __HAL_RCC_TIM2_CLK_ENABLE(); + __HAL_RCC_TIM2_FORCE_RESET(); + __HAL_RCC_TIM2_RELEASE_RESET(); + + HAL_NVIC_DisableIRQ(TIM2_IRQn); + NVIC_ClearPendingIRQ(TIM2_IRQn); + + // URS ensures update flags/interrupts are only from real overflows. + TIM2->CR1 = TIM_CR1_URS; + TIM2->PSC = prescaler; + TIM2->ARR = 0xFFFFFFFFU; + TIM2->CNT = 0; + TIM2->EGR = TIM_EGR_UG; + + // Clear pending flags and enable update interrupt for wrap extension. + TIM2->SR = 0; + TIM2->DIER = TIM_DIER_UIE; + + HAL_NVIC_SetPriority(TIM2_IRQn, 15, 0); + HAL_NVIC_EnableIRQ(TIM2_IRQn); + + TIM2->CR1 |= TIM_CR1_CEN; + + // Clear any latent startup update state before first read. + TIM2->SR = 0; + NVIC_ClearPendingIRQ(TIM2_IRQn); +} + +void tim2_hndlr(void) +{ + if ((TIM2->SR & TIM_SR_UIF) != 0U) { + TIM2->SR &= ~TIM_SR_UIF; + monotonic_hi += (1ULL << 32); + } +} + +void init_clock_cfg(void) +{ + RCC_OscInitTypeDef RCC_OscInitStruct = {0}; + RCC_ClkInitTypeDef RCC_ClkInitStruct = {0}; + + /* 80 MHz on STM32L4+ => Range 1 normal mode, not boost */ + __HAL_RCC_PWR_CLK_ENABLE(); + + if (HAL_PWREx_ControlVoltageScaling(PWR_REGULATOR_VOLTAGE_SCALE1) != HAL_OK) { + while (1) {} + } + + /* HSI16 -> PLL -> 80 MHz SYSCLK */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI; + RCC_OscInitStruct.HSIState = RCC_HSI_ON; + RCC_OscInitStruct.HSICalibrationValue = RCC_HSICALIBRATION_DEFAULT; + + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSI; + RCC_OscInitStruct.PLL.PLLM = 1; + RCC_OscInitStruct.PLL.PLLN = 10; + RCC_OscInitStruct.PLL.PLLR = RCC_PLLR_DIV2; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV7; // arbitrary unless you use PLLP + RCC_OscInitStruct.PLL.PLLQ = RCC_PLLQ_DIV2; // arbitrary unless you use PLLQ + + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) { + while (1) {} + } + + RCC_ClkInitStruct.ClockType = + RCC_CLOCKTYPE_SYSCLK | + RCC_CLOCKTYPE_HCLK | + RCC_CLOCKTYPE_PCLK1 | + RCC_CLOCKTYPE_PCLK2; + + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV1; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV1; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_4) != HAL_OK) { + while (1) {} + } + + SystemCoreClockUpdate(); + init_monotonic_timer(); +} + +unsigned long long monotonic_now(void) +{ + uint64_t hi_1; + uint64_t hi_2; + uint32_t lo; + uint32_t sr; + + // Retry if the overflow IRQ updates the high word while sampling. + do { + hi_1 = monotonic_hi; + lo = TIM2->CNT; + sr = TIM2->SR; + hi_2 = monotonic_hi; + } while (hi_1 != hi_2); + + // If overflow is pending but IRQ has not run yet, include that wrap. + if ((sr & TIM_SR_UIF) != 0U && lo < 0x80000000U) { + hi_1 += (1ULL << 32); + } + + return hi_1 | (uint64_t)lo; +} + +unsigned long long monotonic_freq(void) +{ + return 1000000ULL; +} \ No newline at end of file diff --git a/machine/arm/stm32l4xx/interface/export.h b/machine/arm/stm32l4xx/interface/export.h index ce3e398..9d5910b 100644 --- a/machine/arm/stm32l4xx/interface/export.h +++ b/machine/arm/stm32l4xx/interface/export.h @@ -1,6 +1,7 @@ #pragma once // lib.c +unsigned long long systick_freq(void); void init_hal(void); // uart.c @@ -16,3 +17,9 @@ void dwt_reset(void); long dwt_read(void); float dwt_read_ns(void); float dwt_cycles_to_ns(long cycles); + +// clock.c +void SystemClock_Config(void); + +unsigned long long monotonic_now(void); +unsigned long long monotonic_freq(void); diff --git a/machine/arm/stm32l4xx/interface/lib.c b/machine/arm/stm32l4xx/interface/lib.c index 188119c..41319fa 100644 --- a/machine/arm/stm32l4xx/interface/lib.c +++ b/machine/arm/stm32l4xx/interface/lib.c @@ -15,11 +15,14 @@ static void enable_faults(void) { } static void init_systick(void) { - HAL_SYSTICK_Config(SystemCoreClock / - 10); // Configure SysTick to interrupt every 1 ms + HAL_SYSTICK_Config(SystemCoreClock / 1000); // Configure SysTick to interrupt every 1 ms HAL_SYSTICK_CLKSourceConfig(SYSTICK_CLKSOURCE_HCLK); } +unsigned long long systick_freq(void) { + return 1000; +} + void init_hal(void) { #if OSIRIS_TUNING_ENABLEFPU init_fpu(); @@ -28,6 +31,7 @@ void init_hal(void) { enable_faults(); + init_clock_cfg(); init_systick(); } diff --git a/machine/arm/stm32l4xx/interface/lib.h b/machine/arm/stm32l4xx/interface/lib.h index 7b9637e..c66f171 100644 --- a/machine/arm/stm32l4xx/interface/lib.h +++ b/machine/arm/stm32l4xx/interface/lib.h @@ -1 +1,3 @@ -#pragma once \ No newline at end of file +#pragma once + +void init_clock_cfg(void); \ No newline at end of file diff --git a/machine/arm/stm32l4xx/interface/sched.c b/machine/arm/stm32l4xx/interface/sched.c index a301d50..8e90155 100644 --- a/machine/arm/stm32l4xx/interface/sched.c +++ b/machine/arm/stm32l4xx/interface/sched.c @@ -6,4 +6,5 @@ void reschedule(void) { SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk; // Trigger PendSV exception __ISB(); __DSB(); -} \ No newline at end of file +} + diff --git a/machine/arm/stm32l4xx/link.ld b/machine/arm/stm32l4xx/link.ld index c964b11..ef1eae1 100644 --- a/machine/arm/stm32l4xx/link.ld +++ b/machine/arm/stm32l4xx/link.ld @@ -1,11 +1,6 @@ -#ifndef MCU_HEADER -#error "MCU_HEADER is not defined." -#endif - -#include MCU_HEADER - __ram_start = ORIGIN(RAM); __ram_end = ORIGIN(RAM) + LENGTH(RAM); +__stack_size = 0x8000; #if OSIRIS_DEBUG_RUNTIMESYMBOLS /* at least 250kb for the symbol table */ @@ -39,15 +34,6 @@ SECTIONS KEEP(*(.ivt.ext)); } > FLASH :text - .stack (NOLOAD) : - { - . = ALIGN(4); - __stack_start = .; - . = . + __stack_size; - . = ALIGN(4); - __stack_top = .; - } > RAM - .text : { *(.text .text.* .gnu.linkonce.t*) @@ -130,6 +116,15 @@ SECTIONS __bss_end = .; } > RAM :data + .stack (NOLOAD) : + { + . = ALIGN(4); + __stack_start = .; + . = . + __stack_size; + . = ALIGN(4); + __stack_top = .; + } > RAM + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } } diff --git a/machine/arm/stm32l4xx/r5zi/lib.c b/machine/arm/stm32l4xx/r5zi/lib.c index 877422d..83c8135 100644 --- a/machine/arm/stm32l4xx/r5zi/lib.c +++ b/machine/arm/stm32l4xx/r5zi/lib.c @@ -1,6 +1,4 @@ #include - -#include #include /* @@ -197,31 +195,3 @@ const uintptr_t vector_table_ext[] __attribute__((section(".ivt.ext"))) = { (uintptr_t)&gfxmmu_hndlr, (uintptr_t)&dmamux1_ovr_hndlr, }; - -void init_mem_maps(BootInfo *boot_info) { - boot_info->mmap_len = 3; - - // SRAM1 - boot_info->mmap[0] = (MemMapEntry){ - .size = sizeof(MemMapEntry), - .addr = 0x20000000, - .length = 0x30000, - .ty = 1, - }; - - // SRAM2 - boot_info->mmap[1] = (MemMapEntry){ - .size = sizeof(MemMapEntry), - .addr = 0x20030000, - .length = 0x10000, - .ty = 1, - }; - - // SRAM3 - boot_info->mmap[2] = (MemMapEntry){ - .size = sizeof(MemMapEntry), - .addr = 0x20040000, - .length = 0x60000, - .ty = 1, - }; -} diff --git a/machine/arm/stm32l4xx/r5zi/link.ld b/machine/arm/stm32l4xx/r5zi/link.ld deleted file mode 100644 index 06a4b1f..0000000 --- a/machine/arm/stm32l4xx/r5zi/link.ld +++ /dev/null @@ -1,7 +0,0 @@ -MEMORY -{ - FLASH (rx) : ORIGIN = 0x08000000, LENGTH = 2M - RAM (rwx) : ORIGIN = 0x20000000, LENGTH = 640K -} - -__stack_size = 0x8000; \ No newline at end of file diff --git a/machine/select/Cargo.toml b/machine/select/Cargo.toml index 4d068cc..1bb1694 100644 --- a/machine/select/Cargo.toml +++ b/machine/select/Cargo.toml @@ -2,7 +2,6 @@ name = "hal-select" version = "0.1.0" edition = "2024" -links = "hal" [dependencies] hal-api = { path = "../api" } diff --git a/machine/select/build.rs b/machine/select/build.rs index d7c04ac..78d1124 100644 --- a/machine/select/build.rs +++ b/machine/select/build.rs @@ -8,13 +8,6 @@ fn main() { } } - // Pass linker script to top level - if let Ok(linker_script) = std::env::var("DEP_HALARM_LINKER_SCRIPT") { - println!("cargo::metadata=linker-script={linker_script}"); - } else { - println!("cargo::warning=LD_SCRIPT_PATH environment variable not set."); - } - cfg_aliases! { freestanding: { all(not(test), not(doctest), not(doc), not(kani), any(target_os = "none", target_os = "unknown")) }, } diff --git a/machine/testing/src/asm.rs b/machine/testing/src/asm.rs index 9322be2..fbd7dd5 100644 --- a/machine/testing/src/asm.rs +++ b/machine/testing/src/asm.rs @@ -11,17 +11,19 @@ pub use crate::__macro_nop as nop; /// Macro for doing a system call. #[macro_export] macro_rules! __macro_syscall { - ($num:expr) => {{}}; - ($num:expr, $arg0:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr) => {{}}; - ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) => {{}}; + ($num:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr) => {{ 0isize }}; + ($num:expr, $arg0:expr, $arg1:expr, $arg2:expr, $arg3:expr) => {{ 0isize }}; } pub use crate::__macro_syscall as syscall; #[inline(always)] -pub fn disable_interrupts() {} +pub fn disable_irq_save() -> usize { + 0 +} #[inline(always)] pub fn are_interrupts_enabled() -> bool { @@ -29,7 +31,7 @@ pub fn are_interrupts_enabled() -> bool { } #[inline(always)] -pub fn enable_interrupts() {} +pub fn enable_irq_restr(state: usize) {} #[macro_export] macro_rules! __macro_startup_trampoline { diff --git a/machine/testing/src/lib.rs b/machine/testing/src/lib.rs index 4236f4f..4477ff1 100644 --- a/machine/testing/src/lib.rs +++ b/machine/testing/src/lib.rs @@ -1,3 +1,6 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg(not(target_os = "none"))] + use core::result::Result::Ok; use hal_api::{Result, Schedable}; @@ -26,6 +29,18 @@ impl hal_api::Machinelike for TestingMachine { (0, 0.0) } + fn monotonic_now() -> u64 { + 0 + } + + fn monotonic_freq() -> u64 { + 0 + } + + fn systick_freq() -> u64 { + 0 + } + type ExcepBacktrace = String; type ExcepStackFrame = String; diff --git a/machine/testing/src/sched.rs b/machine/testing/src/sched.rs index 7bf874d..ced7100 100644 --- a/machine/testing/src/sched.rs +++ b/machine/testing/src/sched.rs @@ -2,17 +2,17 @@ use std::ffi::c_void; use hal_api::{ Result, - stack::{StackDescriptor, Stacklike}, + stack::{Descriptor, Stacklike}, }; -#[derive(Debug)] +#[derive(Debug, Clone, Copy)] pub struct TestingStack {} impl Stacklike for TestingStack { type ElemSize = usize; type StackPtr = *mut c_void; - unsafe fn new(_desc: StackDescriptor) -> Result + unsafe fn new(_desc: Descriptor) -> Result where Self: Sized, { diff --git a/macros/src/lib.rs b/macros/src/lib.rs index 903945a..49f1b4a 100644 --- a/macros/src/lib.rs +++ b/macros/src/lib.rs @@ -1,124 +1,54 @@ -use quote::{ToTokens, format_ident}; use syn::parse_macro_input; -use proc_macro2::TokenStream; +mod tree; +mod syscall; +mod logging; -#[proc_macro_attribute] -pub fn service( - attr: proc_macro::TokenStream, - item: proc_macro::TokenStream, -) -> proc_macro::TokenStream { - // This macro should be used to annotate a service struct. - let item = syn::parse_macro_input!(item as syn::ItemStruct); - - let service_name = item.ident.clone(); +#[proc_macro_derive(TaggedLinks, attributes(rbtree, list))] +pub fn derive_tagged_links(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = syn::parse_macro_input!(input as syn::DeriveInput); - let mut mem_size: usize = 0; - let mut stack_size: usize = 0; + match tree::derive_tagged_links(&input) { + Ok(tokens) => tokens, + Err(e) => e.to_compile_error(), + }.into() +} - let parser = syn::meta::parser(|meta| { - if meta.path.is_ident("mem_size") { - mem_size = meta.value()?.parse::()?.base10_parse()?; - Ok(()) - } else if meta.path.is_ident("stack_size") { - stack_size = meta.value()?.parse::()?.base10_parse()?; - Ok(()) - } else { - Err(meta.error("unknown attribute")) - } - }); +#[proc_macro_attribute] +pub fn fmt(args: proc_macro::TokenStream, input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input = syn::parse_macro_input!(input as syn::DeriveInput); - parse_macro_input!(attr with parser); + match logging::derive_fmt(&input) { + Ok(tokens) => tokens, + Err(e) => e.to_compile_error(), + }.into() +} - let mem_size_ident = format_ident!("TASK_{}_MEM_SIZE", service_name.to_string().to_uppercase()); - let stack_size_ident = format_ident!( - "TASK_{}_STACK_SIZE", - service_name.to_string().to_uppercase() - ); +#[proc_macro_attribute] +pub fn app_main(input: proc_macro::TokenStream, item: proc_macro::TokenStream) -> proc_macro::TokenStream { + let item = syn::parse_macro_input!(item as syn::ItemFn); + let block = &item.block; let expanded = quote::quote! { - const #mem_size_ident: usize = #mem_size; - const #stack_size_ident: usize = #stack_size; - #item - - impl #service_name { - pub fn task_desc() -> crate::sched::task::TaskDescriptor { - crate::sched::task::TaskDescriptor { - mem_size: #mem_size_ident, - } - } + #[unsafe(no_mangle)] + #[unsafe(naked)] + extern "C" fn main() { + osiris::hal::asm::startup_trampoline!(); } - }; - expanded.into() -} + #[cfg(freestanding)] + #[panic_handler] + fn panic(info: &core::panic::PanicInfo) -> ! { + osiris::panic(info); + } -const SYSCALL_MAX_ARGS: usize = 4; - -fn is_return_type_register_sized_check( - item: &syn::ItemFn, -) -> Result { - let ret_ty = match &item.sig.output { - syn::ReturnType::Default => { - // no "-> Type" present - return Err(syn::Error::new_spanned( - &item.sig.output, - "syscall_handler: missing return type; expected a register‐sized type", - )); + #[unsafe(no_mangle)] + pub extern "C" fn app_main() -> () { + #block } - syn::ReturnType::Type(_, ty) => (*ty).clone(), }; - Ok(quote::quote! { - const _: () = { - if core::mem::size_of::<#ret_ty>() > core::mem::size_of::() { - panic!("syscall_handler: the return type is bigger than usize. return type must fit in a register."); - } - }; - }) -} - -fn check_and_collect_argument_types(item: &syn::ItemFn) -> Result, syn::Error> { - let types: Vec> = item - .sig - .inputs - .iter() - .map(|arg| { - if let syn::FnArg::Typed(pat_type) = arg { - Ok((*pat_type.ty).clone()) - } else { - Err(syn::Error::new( - item.sig.ident.span(), - format!( - "argument {} is invalid. expected a typed argument.\n", - arg.to_token_stream() - ), - )) - } - }) - .collect(); - - let concat_errors: Vec<_> = types - .iter() - .filter_map(|arg0: &std::result::Result| Result::err(arg0.clone())) - .collect(); - - if !concat_errors.is_empty() { - return Err(syn::Error::new( - item.sig.ident.span(), - format!( - "syscall_handler: function {} has invalid arguments: {}", - item.sig.ident, - concat_errors - .iter() - .map(|e| e.to_string()) - .collect::>() - .join(", ") - ), - )); - } - - Ok(types.into_iter().map(Result::unwrap).collect()) + expanded.into() } #[proc_macro_attribute] @@ -140,83 +70,7 @@ pub fn syscall_handler( parse_macro_input!(attr with parser); let item = syn::parse_macro_input!(item as syn::ItemFn); - syscall_handler_fn(&item).into() + syscall::syscall_handler_fn(&item).into() } -fn syscall_handler_fn(item: &syn::ItemFn) -> TokenStream { - let name = item.sig.ident.to_string().to_uppercase(); - let num_args = item.sig.inputs.len(); - - // Check if the function has a valid signature. So args <= 4 and return type is u32. - if num_args > SYSCALL_MAX_ARGS { - return syn::Error::new( - item.sig.ident.span(), - format!( - "syscall_handler: function {name} has too many arguments (max is {SYSCALL_MAX_ARGS})" - ), - ) - .to_compile_error(); - } - - let ret_check = match is_return_type_register_sized_check(item) { - Ok(check) => check, - Err(e) => return e.to_compile_error(), - }; - - let types = match check_and_collect_argument_types(item) { - Ok(types) => { - if types.len() > SYSCALL_MAX_ARGS { - return syn::Error::new( - item.sig.ident.span(), - format!( - "syscall_handler: function {name} has too many arguments (max is {SYSCALL_MAX_ARGS})" - ), - ) - .to_compile_error(); - } - types - } - Err(e) => return e.to_compile_error(), - }; - - // Check if each argument type is valid and fits in a register. - let size_checks: Vec = types.iter().map(|ty| { - quote::quote! { - const _: () = { - if core::mem::size_of::<#ty>() > core::mem::size_of::() { - panic!("syscall_handler: an argument type is bigger than usize. arguments must fit in a register."); - } - }; - } - }).collect(); - - let unpack = types.iter().enumerate().map(|(i, ty)| { - quote::quote! { - unsafe { *(args.add(#i) as *const #ty) } - } - }); - - let wrapper_name = format_ident!("entry_{}", item.sig.ident.clone()); - let func_name = item.sig.ident.clone(); - - let call = quote::quote! { - #func_name( #(#unpack),* ) - }; - let wrapper = quote::quote! { - #[unsafe(no_mangle)] - pub extern "C" fn #wrapper_name(svc_args: *const core::ffi::c_uint) -> core::ffi::c_int { - // This function needs to extract the arguments from the pointer and call the original function by passing the arguments as actual different parameters. - let args = unsafe { svc_args as *const usize }; - // Call the original function with the extracted arguments. - #call - } - }; - - quote::quote! { - #wrapper - #item - #ret_check - #(#size_checks)* - } -} diff --git a/macros/src/logging.rs b/macros/src/logging.rs new file mode 100644 index 0000000..9e857fe --- /dev/null +++ b/macros/src/logging.rs @@ -0,0 +1,24 @@ +use syn::{DeriveInput, ItemFn}; + +pub fn derive_fmt(input: &DeriveInput) -> syn::Result { + // Check if the env variable "OSIRIS_DEBUG_DEFMT" is set. If it is, generate a defmt::Format implementation. Otherwise, generate a Debug implementation. + if std::env::var("OSIRIS_DEBUG_DEFMT").is_ok() { + Ok(derive_fmt_defmt(input)) + } else { + Ok(derive_fmt_debug(input)) + } +} + +fn derive_fmt_defmt(input: &DeriveInput) -> proc_macro2::TokenStream { + quote::quote! { + #[derive(defmt::Format)] + #input + } +} + +fn derive_fmt_debug(input: &DeriveInput) -> proc_macro2::TokenStream { + quote::quote! { + #[derive(core::fmt::Debug)] + #input + } +} \ No newline at end of file diff --git a/macros/src/syscall.rs b/macros/src/syscall.rs new file mode 100644 index 0000000..45c071c --- /dev/null +++ b/macros/src/syscall.rs @@ -0,0 +1,146 @@ +use quote::{ToTokens, format_ident}; +use proc_macro2::TokenStream; + +pub const MAX_ARGS: usize = 4; + +pub fn valid_ret_type_check(item: &syn::ItemFn) -> Result { + let ret_ty = match &item.sig.output { + syn::ReturnType::Default => { + // no "-> Type" present + return Err(syn::Error::new_spanned( + &item.sig.output, + "syscall_handler: missing return type; expected a register‐sized type", + )); + } + syn::ReturnType::Type(_, ty) => (*ty).clone(), + }; + + Ok(quote::quote! { + const _: () = { + if core::mem::size_of::<#ret_ty>() > core::mem::size_of::() { + panic!("syscall_handler: the return type is bigger than usize. return type must fit in a register."); + } + }; + }) +} + +pub fn valid_arg_types_check(item: &syn::ItemFn) -> Result, syn::Error> { + let types: Vec> = item + .sig + .inputs + .iter() + .map(|arg| { + if let syn::FnArg::Typed(pat_type) = arg { + Ok((*pat_type.ty).clone()) + } else { + Err(syn::Error::new( + item.sig.ident.span(), + format!( + "argument {} is invalid. expected a typed argument.\n", + arg.to_token_stream() + ), + )) + } + }) + .collect(); + + let concat_errors: Vec<_> = types + .iter() + .filter_map(|arg0: &std::result::Result| Result::err(arg0.clone())) + .collect(); + + if !concat_errors.is_empty() { + return Err(syn::Error::new( + item.sig.ident.span(), + format!( + "syscall_handler: function {} has invalid arguments: {}", + item.sig.ident, + concat_errors + .iter() + .map(|e| e.to_string()) + .collect::>() + .join(", ") + ), + )); + } + + Ok(types.into_iter().map(Result::unwrap).collect()) +} + +pub fn syscall_handler_fn(item: &syn::ItemFn) -> TokenStream { + let name = item.sig.ident.to_string().to_uppercase(); + let num_args = item.sig.inputs.len(); + + // Check if the function has a valid signature. So args <= 4 and return type is u32. + if num_args > MAX_ARGS { + return syn::Error::new( + item.sig.ident.span(), + format!( + "syscall_handler: function {name} has too many arguments (max is {MAX_ARGS})", + ), + ) + .to_compile_error(); + } + + let ret_check = match valid_ret_type_check(item) { + Ok(check) => check, + Err(e) => return e.to_compile_error(), + }; + + let types = match valid_arg_types_check(item) { + Ok(types) => { + if types.len() > MAX_ARGS { + return syn::Error::new( + item.sig.ident.span(), + format!( + "syscall_handler: function {name} has too many arguments (max is {MAX_ARGS})", + ), + ) + .to_compile_error(); + } + types + } + Err(e) => return e.to_compile_error(), + }; + + // Check if each argument type is valid and fits in a register. + let size_checks: Vec = types.iter().map(|ty| { + quote::quote! { + const _: () = { + if core::mem::size_of::<#ty>() > core::mem::size_of::() { + panic!("syscall_handler: an argument type is bigger than usize. arguments must fit in a register."); + } + }; + } + }).collect(); + + let unpack = types.iter().enumerate().map(|(i, ty)| { + quote::quote! { + unsafe { *(args.add(#i) as *const #ty) } + } + }); + + let wrapper_name = format_ident!("entry_{}", item.sig.ident.clone()); + let func_name = item.sig.ident.clone(); + + let call = quote::quote! { + #func_name( #(#unpack),* ) + }; + + let wrapper = quote::quote! { + #[unsafe(no_mangle)] + pub extern "C" fn #wrapper_name(svc_args: *const core::ffi::c_uint) -> core::ffi::c_int { + // This function needs to extract the arguments from the pointer and call the original function by passing the arguments as actual different parameters. + let args = unsafe { svc_args as *const usize }; + // Call the original function with the extracted arguments. + #call + } + }; + + quote::quote! { + #wrapper + #item + #ret_check + #(#size_checks)* + } +} diff --git a/macros/src/tree.rs b/macros/src/tree.rs new file mode 100644 index 0000000..9a37950 --- /dev/null +++ b/macros/src/tree.rs @@ -0,0 +1,126 @@ +use quote::quote; +use syn::{ + spanned::Spanned, Data, DeriveInput, Error, Fields, Path, +}; + +pub fn derive_tagged_links(input: &DeriveInput) -> syn::Result { + let fields = match &input.data { + Data::Struct(ds) => match &ds.fields { + Fields::Named(named) => &named.named, + _ => { + return Err(Error::new( + ds.fields.span(), + "TaggedLinks only supports structs with named fields", + )) + } + }, + _ => { + return Err(Error::new( + input.span(), + "TaggedLinks can only be derived for structs", + )) + } + }; + + let rbtree_impls = impl_rbtree(input, fields)?; + let list_impls = impl_list(input, fields)?; + + Ok(quote! { + #rbtree_impls + #list_impls + }) +} + +fn impl_rbtree(input: &DeriveInput, fields: &syn::punctuated::Punctuated) -> syn::Result { + let struct_ident = &input.ident; + let generics = &input.generics; + + let mut impls = Vec::new(); + + for field in fields { + let Some(field_ident) = field.ident.clone() else { continue }; + + if let (Some(tag_path), Some(idx_path)) = find_tagged(&field.attrs, "rbtree")? { + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let impl_block = quote! { + impl #impl_generics crate::types::rbtree::Linkable<#tag_path, #idx_path> for #struct_ident #ty_generics #where_clause { + #[inline] + fn links(&self) -> &crate::types::rbtree::Links<#tag_path, #idx_path> { + &self.#field_ident + } + #[inline] + fn links_mut(&mut self) -> &mut crate::types::rbtree::Links<#tag_path, #idx_path> { + &mut self.#field_ident + } + } + }; + + impls.push(impl_block); + } + } + + Ok(quote! { #(#impls)* }) +} + +fn impl_list(input: &DeriveInput, fields: &syn::punctuated::Punctuated) -> syn::Result { + let struct_ident = &input.ident; + let generics = &input.generics; + + let mut impls = Vec::new(); + + for field in fields { + let Some(field_ident) = field.ident.clone() else { continue }; + + if let (Some(tag_path), Some(idx_path)) = find_tagged(&field.attrs, "list")? { + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let impl_block = quote! { + impl #impl_generics crate::types::list::Linkable<#tag_path, #idx_path> for #struct_ident #ty_generics #where_clause { + #[inline] + fn links(&self) -> &crate::types::list::Links<#tag_path, #idx_path> { + &self.#field_ident + } + #[inline] + fn links_mut(&mut self) -> &mut crate::types::list::Links<#tag_path, #idx_path> { + &mut self.#field_ident + } + } + }; + + impls.push(impl_block); + } + } + + Ok(quote! { #(#impls)* }) +} + +fn find_tagged(attrs: &[syn::Attribute], attr_name: &str) -> syn::Result<(Option, Option)> { + for attr in attrs { + if !attr.path().is_ident(attr_name) { + continue; + } + + let mut tag: Option = None; + let mut idx: Option = None; + + attr.parse_nested_meta(|meta| { + if meta.path.is_ident("tag") { + let value = meta.value()?; // expects '=' + let p: Path = value.parse()?; + tag = Some(p); + Ok(()) + } else if meta.path.is_ident("idx") { + let value = meta.value()?; // expects '=' + let p: Path = value.parse()?; + idx = Some(p); + Ok(()) + } else { + Err(meta.error("expected `tag = SomePath` or `idx = SomePath`")) + } + })?; + + return Ok((tag, idx)); + } + Ok((None, None)) +} diff --git a/options.toml b/options.toml index d7bf1ce..d292dd3 100644 --- a/options.toml +++ b/options.toml @@ -26,20 +26,14 @@ description = "Enables the Floating Point Unit (FPU). This is required for appli type = "Boolean" default = false -[tuning.appmemsize] -name = "Application Memory Size" -description = "Sets the size of the initial memory region for the init application. This memory is used for the heap and stack." -type = { type = "Integer", min = 0 } -default = 8192 - -[tuning.appstacksize] -name = "Application Stack Size" -description = "Sets the size of the stack for the init application. This must be less than the application memory size." -type = { type = "Integer", min = 0 } -default = 2048 - [tuning.dts] name = "Device Tree Source" description = "Board DTS file targeted to build OS for. Relative to the toplevel boards/ directory." type = "String" default = "nucleo_l4r5zi.dts" + +[stackpages] +name = "Stack Pages" +description = "Number of pages to allocate for the kernel stack." +type = { type = "Integer", min = 1 } +default = 1 diff --git a/presets/stm32l4r5zi_def.toml b/presets/stm32l4r5zi_def.toml index c5f9166..fc6a853 100644 --- a/presets/stm32l4r5zi_def.toml +++ b/presets/stm32l4r5zi_def.toml @@ -11,17 +11,7 @@ OSIRIS_DEBUG_RUNTIMESYMBOLS = "false" # Tuning parameters OSIRIS_TUNING_ENABLEFPU = "false" - -OSIRIS_TUNING_APPSTACKSIZE = "2048" -OSIRIS_TUNING_APPMEMSIZE = "8192" +OSIRIS_STACKPAGES = "1" [build] -target = "thumbv7em-none-eabi" - -[target.'cfg(target_os = "none")'] -rustflags = [ - "-C", "link-arg=--entry=main", -] - -[target.thumbv7em-none-eabi] -rustflags = ["-C", "relocation-model=ropi-rwpi"] \ No newline at end of file +target = "thumbv7em-none-eabi" \ No newline at end of file diff --git a/presets/testing_def.toml b/presets/testing_def.toml index 1ed804d..59b9c04 100644 --- a/presets/testing_def.toml +++ b/presets/testing_def.toml @@ -1,6 +1,7 @@ # This is the default configuration for running tests and verification. [env] +OSIRIS_STACKPAGES = "1" [build] target = "host-tuple" \ No newline at end of file diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..cc48035 --- /dev/null +++ b/src/error.rs @@ -0,0 +1,179 @@ +//! Utility functions and definitions for the kernel. +#![cfg_attr(feature = "nightly", feature(likely_unlikely))] + +use core::fmt::Display; +use hal::mem::PhysAddr; +use core::fmt::Debug; + +/// These two definitions are copied from https://github.com/rust-lang/hashbrown +#[cfg(not(feature = "nightly"))] +#[allow(unused_imports)] +pub(crate) use core::convert::{identity as likely, identity as unlikely}; + +#[cfg(feature = "nightly")] +pub(crate) use core::hint::{likely, unlikely}; + +pub type Result = core::result::Result; + +/// This is a macro that is used to panic when a bug is detected. +/// It is similar to the BUG() macro in the Linux kernel. Link: [https://www.kernel.org/]() +#[macro_export] +macro_rules! bug { + () => { + panic!("BUG at {}:{}", file!(), line!()); + }; + ($fmt:literal $(, $arg:expr)* $(,)?) => {{ + panic!(concat!("BUG at {}:{}: ", $fmt), file!(), line!() $(, $arg)*); + }}; +} + +#[macro_export] +macro_rules! warn { + () => { + kprintln!("WARN at {}:{}", file!(), line!()); + }; + ($fmt:literal $(, $arg:expr)* $(,)?) => {{ + kprintln!(concat!("WARN at {}:{}: ", $fmt), file!(), line!() $(, $arg)*); + }}; +} + +/// This is a macro that is used to panic when a condition is true. +/// It is similar to the BUG_ON() macro in the Linux kernel. Link: [https://www.kernel.org/]() +macro_rules! bug_on { + ($cond:expr) => {{ + let cond = $cond; + #[allow(unused_unsafe)] + if unsafe { $crate::error::unlikely(cond) } { + panic!("BUG({}) at {}:{}", stringify!($cond), file!(), line!()); + } + }}; + ($cond:expr, $fmt:literal $(, $arg:expr)* $(,)?) => {{ + let cond = $cond; + #[allow(unused_unsafe)] + if unsafe { $crate::error::unlikely(cond) } { + panic!(concat!("BUG({}) at {}:{}: ", $fmt), stringify!($cond), file!(), line!() $(, $arg)*); + } + }}; +} + +macro_rules! warn_on { + ($cond:expr) => {{ + let cond = $cond; + #[allow(unused_unsafe)] + if unsafe { $crate::error::unlikely(cond) } { + kprintln!("WARN({}) at {}:{}", stringify!($cond), file!(), line!()); + } + }}; + ($cond:expr, $fmt:literal $(, $arg:expr)* $(,)?) => {{ + let cond = $cond; + #[allow(unused_unsafe)] + if unsafe { $crate::error::unlikely(cond) } { + kprintln!(concat!("WARN({}) at {}:{}: ", $fmt), stringify!($cond), file!(), line!() $(, $arg)*); + } + }}; +} + +macro_rules! kerr { + ($kind:ident) => { + $crate::error::Error::new($crate::error::Kind::$kind) + }; + ($kind:expr, $msg:expr) => { + use $crate::error::Error; + #[cfg(feature = "error-msg")] + { + Error::new($crate::error::Kind::$kind).with_msg($msg) + } + #[cfg(not(feature = "error-msg"))] + { + Error::new($crate::error::Kind::$kind) + } + }; +} + +#[proc_macros::fmt] +#[derive(Clone, PartialEq, Eq)] +pub enum Kind { + InvalidAlign, + OutOfMemory, + InvalidSize, + InvalidAddress(PhysAddr), + InvalidArgument, + NotFound, + Hal(hal::Error), +} + +impl Display for Kind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Kind::InvalidAlign => write!(f, "Invalid alignment"), + Kind::OutOfMemory => write!(f, "Out of memory"), + Kind::InvalidSize => write!(f, "Invalid size"), + Kind::InvalidAddress(addr) => write!(f, "Invalid address: {addr:#x}"), + Kind::InvalidArgument => write!(f, "Invalid argument"), + Kind::NotFound => write!(f, "Not found"), + Kind::Hal(e) => write!(f, "HAL error: {e:?}"), + } + } +} + +pub struct Error { + pub kind: Kind, + #[cfg(feature = "error-msg")] + msg: Option<&'static str>, +} + +impl Error { + pub fn new(kind: Kind) -> Self { + #[cfg(feature = "error-msg")] + { + Self { kind, msg: None } + } + #[cfg(not(feature = "error-msg"))] + { + Self { kind } + } + } + + #[cfg(feature = "error-msg")] + pub fn with_msg(mut self, msg: &'static str) -> Self { + self.msg = Some(msg); + self + } +} + +impl Debug for Error { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #[cfg(feature = "error-msg")] + { + match self.msg { + Some(msg) => write!(f, "{}: {}", self.kind, msg), + None => write!(f, "{}", self.kind), + } + } + #[cfg(not(feature = "error-msg"))] + { + write!(f, "{}", self.kind) + } + } +} + +impl Display for Error { + #[cfg(not(feature = "error-msg"))] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.kind) + } + + #[cfg(feature = "error-msg")] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self.msg { + Some(msg) => write!(f, "{}: {}", self.kind, msg), + None => write!(f, "{}", self.kind), + } + } +} + +impl From for Error { + fn from(e: hal::Error) -> Self { + Self::new(Kind::Hal(e)) + } +} \ No newline at end of file diff --git a/src/idle.rs b/src/idle.rs new file mode 100644 index 0000000..940605e --- /dev/null +++ b/src/idle.rs @@ -0,0 +1,16 @@ +use crate::sched; + +extern "C" fn entry() { + loop { + hal::asm::nop!(); + } +} + +pub fn init() { + let attrs = sched::thread::Attributes { entry, fin: None, attrs: None }; + sched::with(|sched| { + if let Err(e) = sched.create_thread(Some(sched::task::KERNEL_TASK), &attrs) { + panic!("failed to create idle thread. Error: {}", e); + } + }); +} diff --git a/src/lib.rs b/src/lib.rs index cb40e60..1218892 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,62 +6,75 @@ #[macro_use] mod macros; #[macro_use] -mod utils; +mod error; mod faults; mod mem; -pub mod print; -pub mod sched; -pub mod sync; -pub mod syscalls; -pub mod time; -pub mod uspace; +mod types; +mod idle; +mod uspace; +mod print; + +mod sched; +mod sync; +mod syscalls; +mod time; + +pub mod uapi; use hal::Machinelike; -use interface::BootInfo; -include!(concat!(env!("OUT_DIR"), "/syscalls_export.rs")); include!(concat!(env!("OUT_DIR"), "/device_tree.rs")); +pub use hal; +pub use proc_macros::app_main; + /// The kernel initialization function. /// -/// `boot_info` - The boot information. -/// /// # Safety /// /// This function must be called only once during the kernel startup. -/// The `boot_info` pointer must be valid and point to a properly initialized `BootInfo` structure. #[unsafe(no_mangle)] -pub unsafe extern "C" fn kernel_init(boot_info: *const BootInfo) -> ! { +pub unsafe extern "C" fn kernel_init() -> ! { // Initialize basic hardware and the logging system. hal::Machine::init(); hal::Machine::bench_start(); - if boot_info.is_null() || !boot_info.is_aligned() { - panic!("[Kernel] Error: boot_info pointer is null or unaligned."); - } - - // Safety: We trust the bootloader to provide a valid boot_info structure. - let boot_info = unsafe { &*boot_info }; - print::print_header(); + error!("Hello World!"); + // Initialize the memory allocator. - if let Err(e) = mem::init_memory(&device_tree::memory::REGIONS) { - panic!("[Kernel] Error: failed to initialize memory allocator. Error: {e:?}"); - } + let kaddr_space = mem::init_memory(); + kprintln!("Memory initialized."); + + sched::init(kaddr_space); + kprintln!("Scheduler initialized."); + + idle::init(); + kprintln!("Idle thread initialized."); let (cyc, ns) = hal::Machine::bench_end(); kprintln!( - "[Osiris] Kernel init took {} cycles taking {} ms", - cyc, - ns as u32 / 1000000 + "Kernel init took {} cycles.", cyc ); // Start the init application. - if let Err(e) = uspace::init_app(boot_info) { - panic!("[Kernel] Error: failed to start init application. Error: {e:?}"); - } + uspace::init_app(); + + sched::enable(); + + loop {} +} + +pub fn panic(info: &core::panic::PanicInfo) -> ! { + kprintln!("**************************** PANIC ****************************"); + kprintln!(""); + kprintln!("Message: {}", info.message()); - loop { - hal::asm::nop!(); + if let Some(location) = info.location() { + kprintln!("Location: {}:{}", location.file(), location.line()); } + + kprintln!("**************************** PANIC ****************************"); + + hal::Machine::panic_handler(info); } diff --git a/src/macros.rs b/src/macros.rs index 7c3a2e0..78ddb33 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -1,27 +1,41 @@ //! Macros for kernel development. -/// Creates a slice from the raw arguments. #[macro_export] -macro_rules! args_from_raw { - ($argc:expr, $argv:expr) => {{ - let argc = $argc; - let argv = $argv; - - if argc == 0 || argv.is_null() { - &[] - } else { - unsafe { core::slice::from_raw_parts(argv, argc) } - } - }}; +macro_rules! debug { + ($fmt:literal $(, $arg:expr)* $(,)?) => { + #[cfg(feature = "defmt")] + defmt::debug!($fmt $(, $arg)*); + }; +} + +#[macro_export] +macro_rules! trace { + ($fmt:literal $(, $arg:expr)* $(,)?) => { + #[cfg(feature = "defmt")] + defmt::trace!($fmt $(, $arg)*); + }; +} + +#[macro_export] +macro_rules! info { + ($fmt:literal $(, $arg:expr)* $(,)?) => { + #[cfg(feature = "defmt")] + defmt::info!($fmt $(, $arg)*); + }; +} + +#[macro_export] +macro_rules! error { + ($fmt:literal $(, $arg:expr)* $(,)?) => { + #[cfg(feature = "defmt")] + defmt::error!($fmt $(, $arg)*); + }; } #[macro_export] macro_rules! kprint { ($($arg:tt)*) => ({ - use core::fmt::Write; - use $crate::print::Printer; - let mut printer = Printer; - printer.write_fmt(format_args!($($arg)*)).unwrap(); + }); } @@ -30,7 +44,15 @@ macro_rules! kprintln { ($($arg:tt)*) => ({ use core::fmt::Write; use $crate::print::Printer; + let mut printer = Printer; + const MICROS_PER_SEC: u64 = 1000000; + let hz = $crate::time::mono_freq(); + let secs = $crate::time::mono_now() / hz; + let rem = $crate::time::mono_now() % hz; + let frac = (rem * MICROS_PER_SEC) / hz; + + write!(&mut printer, "[{}.{:06}] ", secs, frac).unwrap(); printer.write_fmt(format_args!($($arg)*)).unwrap(); printer.write_str("\n").unwrap(); }); diff --git a/src/main.rs b/src/main.rs index ae72d07..5ed2555 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,23 +7,16 @@ pub extern "C" fn main() -> ! { hal::asm::startup_trampoline!(); } +#[unsafe(no_mangle)] +pub extern "C" fn app_main() -> ! { + loop {} +} + /// The panic handler. #[cfg(freestanding)] #[panic_handler] fn panic(info: &core::panic::PanicInfo) -> ! { - use hal::Machinelike; - - osiris::kprintln!("**************************** PANIC ****************************"); - osiris::kprintln!(""); - osiris::kprintln!("Message: {}", info.message()); - - if let Some(location) = info.location() { - osiris::kprintln!("Location: {}:{}", location.file(), location.line()); - } - - osiris::kprintln!("**************************** PANIC ****************************"); - - hal::Machine::panic_handler(info); + osiris::panic(info); } #[cfg(not(freestanding))] diff --git a/src/mem.rs b/src/mem.rs index 897e031..dc1bcdf 100644 --- a/src/mem.rs +++ b/src/mem.rs @@ -1,54 +1,58 @@ //! This module provides access to the global memory allocator. +use crate::mem::pfa::PAGE_SIZE; +use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region}; use crate::sync::spinlock::SpinLocked; -use crate::{BootInfo, utils}; use alloc::Allocator; +use hal::mem::{PhysAddr}; use core::ptr::NonNull; pub mod alloc; -pub mod array; -pub mod boxed; -pub mod heap; -pub mod pool; -pub mod queue; - -/// The possible types of memory. Which is compatible with the multiboot2 memory map. -/// Link: https://www.gnu.org/software/grub/manual/multiboot/multiboot.html -#[repr(C)] -#[allow(unused)] -enum MemoryTypes { - /// Memory that is available for use. - Available = 1, - /// Memory that is reserved for the system. - Reserved = 2, - /// Memory that is reclaimable after ACPI tables are read. - ACPIReclaimable = 3, - /// ACPI Non-volatile-sleeping memory. - Nvs = 4, - /// Memory that is bad. - BadMemory = 5, +pub mod vmm; +pub mod pfa; + +pub const BITS_PER_PTR: usize = core::mem::size_of::() * 8; + +unsafe extern "C" { + unsafe static __stack_top: u8; } /// The global memory allocator. -static GLOBAL_ALLOCATOR: SpinLocked = - SpinLocked::new(alloc::BestFitAllocator::new()); +static GLOBAL_ALLOCATOR: SpinLocked = + SpinLocked::new(alloc::bestfit::BestFitAllocator::new()); /// Initialize the memory allocator. /// /// `regions` - The memory node module of device tree codegen file. /// /// Returns an error if the memory allocator could not be initialized. -pub fn init_memory(regions: &[(&str, usize, usize)]) -> Result<(), utils::KernelError> { - let mut allocator = GLOBAL_ALLOCATOR.lock(); +pub fn init_memory() -> vmm::AddressSpace { + let stack_top = &raw const __stack_top as usize; + if let Err(e) = pfa::init_pfa(PhysAddr::new(stack_top)) { // TODO: Get this from the DeviceTree. + panic!("failed to initialize PFA. Error: {e}"); + } + + // TODO: Configure. + let pgs = 10; + + let mut kaddr_space = vmm::AddressSpace::new(pgs).unwrap_or_else(|e| { + panic!("failed to create kernel address space. Error: {e}"); + }); - for &(_, base, size) in regions { - let range = base..base + size; - unsafe { - allocator.add_range(range)?; + let begin = kaddr_space.map(Region::new(None, 2 * PAGE_SIZE, Backing::Zeroed, Perms::all())).unwrap_or_else(|e| { + panic!("failed to map kernel address space. Error: {e}"); + }); + + { + let mut allocator = GLOBAL_ALLOCATOR.lock(); + + let range = begin..(begin + pgs * PAGE_SIZE); + if let Err(e) = unsafe { allocator.add_range(&range) } { + panic!("failed to add range to allocator. Error: {e}"); } } - Ok(()) + kaddr_space } /// Allocate a memory block. Normally Box or SizedPool should be used instead of this function. @@ -59,7 +63,7 @@ pub fn init_memory(regions: &[(&str, usize, usize)]) -> Result<(), utils::Kernel /// Returns a pointer to the allocated memory block if the allocation was successful, or `None` if the allocation failed. pub fn malloc(size: usize, align: usize) -> Option> { let mut allocator = GLOBAL_ALLOCATOR.lock(); - allocator.malloc(size, align).ok() + allocator.malloc(size, align, None).ok() } /// Free a memory block. @@ -88,61 +92,3 @@ pub fn align_up(size: usize) -> usize { let align = align_of::(); (size + align - 1) & !(align - 1) } - -// VERIFICATION ----------------------------------------------------------------------------------- -#[cfg(kani)] -mod verification { - use super::*; - use crate::mem::alloc::MAX_ADDR; - - fn mock_ptr_write(dst: *mut T, src: T) { - // noop - } - - #[kani::proof] - #[kani::stub(core::ptr::write, mock_ptr_write)] - fn proof_init_allocator_good() { - const MAX_REGIONS: usize = 8; - let regions: [(&str, usize, usize); MAX_REGIONS] = - core::array::from_fn(|i| ("dummy", kani::any(), kani::any())); - - // contrain all regions - for &(_, base, size) in regions.iter() { - kani::assume(base % align_of::() == 0); - kani::assume(base > 0); - kani::assume(size > 0); - kani::assume(size < alloc::MAX_ADDR && size > alloc::BestFitAllocator::MIN_RANGE_SIZE); - kani::assume(base < alloc::MAX_ADDR - size); - } - - // for any i, j, i != j as indices into the memory regions the following should hold - let i: usize = kani::any(); - let j: usize = kani::any(); - kani::assume(i < MAX_REGIONS); - kani::assume(j < MAX_REGIONS); - kani::assume(i != j); - - // non-overlapping regions - let (_, base_i, size_i) = regions[i]; - let (_, base_j, size_j) = regions[j]; - kani::assume(base_i + size_i <= base_j || base_j + size_j <= base_i); - - // verify memory init - assert!(init_memory(®ions).is_ok()); - } - - #[kani::proof] - fn check_align_up() { - let size = kani::any(); - kani::assume(size > 0); - - let align = align_up(size); - assert_ne!(align, 0); - - if align != usize::MAX { - assert_eq!(align % align_of::(), 0); - assert!(align >= size); - } - } -} -// END VERIFICATION diff --git a/src/mem/alloc.rs b/src/mem/alloc.rs index 710ad03..c7a7831 100644 --- a/src/mem/alloc.rs +++ b/src/mem/alloc.rs @@ -1,9 +1,13 @@ //! This module provides a simple allocator. //! One implementation is the BestFitAllocator, which uses the best fit strategy. -use core::{ops::Range, ptr::NonNull}; +use core::ptr::NonNull; -use crate::{BUG_ON, utils}; +use hal::mem::PhysAddr; + +use crate::error::Result; + +pub mod bestfit; #[cfg(target_pointer_width = "64")] pub const MAX_ADDR: usize = 2_usize.pow(48); @@ -21,579 +25,7 @@ pub const MAX_ADDR: usize = usize::MAX; /// Each range added to the allocator must be valid for the whole lifetime of the allocator and must not overlap with any other range. /// The lifetime of any allocation is only valid as long as the allocator is valid. (A pointer must not be used after the allocator is dropped.) pub trait Allocator { - fn malloc(&mut self, size: usize, align: usize) -> Result, utils::KernelError>; + fn malloc(&mut self, size: usize, align: usize, request: Option) -> Result>; unsafe fn free(&mut self, ptr: NonNull, size: usize); } -/// The metadata that is before any block in the BestFitAllocator. -struct BestFitMeta { - /// The size of the block in bytes. - size: usize, - /// The pointer to the next free block. This is `None` if the block is allocated. - next: Option>, -} - -/// This is an allocator implementation that uses the best fit strategy. -/// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size. -/// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks. -/// This means that every block has a header that contains the size of the block and a pointer to the next block. -#[derive(Debug)] -pub struct BestFitAllocator { - /// Head of the free block list. - head: Option>, -} - -/// Implementation of the BestFitAllocator. -impl BestFitAllocator { - pub const MIN_RANGE_SIZE: usize = size_of::() + Self::align_up() + 1; - /// Creates a new BestFitAllocator. - /// - /// Returns the new BestFitAllocator. - pub const fn new() -> Self { - Self { head: None } - } - - /// Adds a range of memory to the allocator. - /// - /// `range` - The range of memory to add. - /// - /// Returns `Ok(())` if the range was added successfully, otherwise an error. - /// - /// # Safety - /// - /// The range must be valid, 128bit aligned and must not overlapping with any other current or future range. - /// The range must also be at least as large as `MIN_RANGE_SIZE`. - /// Also the range must stay valid, for the whole lifetime of the allocator. Also the lifetime of any allocation is only valid as long as the allocator is valid. - pub unsafe fn add_range(&mut self, range: Range) -> Result<(), utils::KernelError> { - let ptr = range.start; - - // Check if the pointer is 128bit aligned. - if !ptr.is_multiple_of(align_of::()) { - return Err(utils::KernelError::InvalidAlign); - } - - if ptr == 0 { - return Err(utils::KernelError::InvalidAddress); - } - - debug_assert!(range.end > range.start); - debug_assert!(range.end - range.start > size_of::() + Self::align_up()); - debug_assert!(range.end <= isize::MAX as usize); - - // The user pointer is the pointer to the user memory. So we need to add the size of the meta data and possibly add padding. - let user_pointer = ptr + size_of::() + Self::align_up(); - - // Set the current head as the next block, so we can add the new block to the head. - let meta = BestFitMeta { - size: range.end - user_pointer, - next: self.head, - }; - - // Write the header to the memory. - unsafe { core::ptr::write(ptr as *mut BestFitMeta, meta) }; - - // Set the head to the new block. - self.head = Some(unsafe { NonNull::new_unchecked(ptr as *mut u8) }); - Ok(()) - } - - /// Calculates the padding required to align the block. Note: We only align to 128bit. - /// - /// Returns the padding in bytes. - const fn align_up() -> usize { - let meta = size_of::(); - let align = align_of::(); - // Calculate the padding required to align the block. - (align - (meta % align)) % align - } - - /// Selects the best fit block for the given size. - /// - /// `size` - The size of the block. - /// - /// Returns the control pointer to the block and the control pointer to the previous block. - fn select_block( - &mut self, - size: usize, - ) -> Result<(NonNull, Option>), utils::KernelError> { - let mut best_fit = Err(utils::KernelError::OutOfMemory); - let mut best_fit_size = usize::MAX; - - let mut current = self.head; - let mut prev = None; - - // Iterate over all blocks and find the best fit. - while let Some(ptr) = current { - // Get the metadata of the block. - let meta = unsafe { ptr.cast::().as_ref() }; - - // Check if the block is big enough and smaller than the current best fit. - if meta.size >= size && meta.size <= best_fit_size { - best_fit = Ok((ptr, prev)); - best_fit_size = meta.size; - } - - // Move to the next block. - prev = current; - current = meta.next; - } - - best_fit - } - - /// Calculates the user pointer from the control pointer. - /// - /// `ptr` - The control pointer. - /// - /// Returns the user pointer. - /// - /// # Safety - /// - /// The ptr must be a valid control pointer. Note: After the allocator which allocated the pointer is dropped, the control pointer is always considered invalid. - unsafe fn user_ptr(ptr: NonNull) -> NonNull { - debug_assert!( - (ptr.as_ptr() as usize) - <= isize::MAX as usize - size_of::() - Self::align_up() - ); - unsafe { ptr.byte_add(size_of::() + Self::align_up()) } - } - - /// Calculates the control pointer from the user pointer. - /// - /// `ptr` - The user pointer. - /// - /// Returns the control pointer. - /// - /// # Safety - /// - /// The ptr must be a valid user pointer. Note: After the allocator which allocated the pointer is dropped, the user pointer is always considered invalid. - unsafe fn control_ptr(ptr: NonNull) -> NonNull { - debug_assert!((ptr.as_ptr() as usize) > size_of::() + Self::align_up()); - unsafe { ptr.byte_sub(size_of::() + Self::align_up()) } - } -} - -/// Implementation of the Allocator trait for BestFitAllocator. -impl Allocator for BestFitAllocator { - /// Allocates a block of memory with the given size and alignment. Note: This function will always yield an invalid align for align > 128bit. - /// - /// `size` - The size of the block. - /// `align` - The alignment of the block. - /// - /// Returns the user pointer to the block if successful, otherwise an error. - fn malloc(&mut self, size: usize, align: usize) -> Result, utils::KernelError> { - // Check if the alignment is valid. - if align > align_of::() { - return Err(utils::KernelError::InvalidAlign); - } - - // Check if the size is valid. - if size == 0 { - return Err(utils::KernelError::InvalidSize); - } - - // For some cfg this warning is correct. But for others its not. - #[allow(clippy::absurd_extreme_comparisons)] - if size >= MAX_ADDR { - return Err(utils::KernelError::InvalidSize); - } - - // Align the size. - let aligned_size = super::align_up(size); - debug_assert!(aligned_size >= size); - debug_assert!(aligned_size <= isize::MAX as usize); - - // Find the best fit block. - let (split, block, prev) = match self.select_block(aligned_size) { - Ok((block, prev)) => { - // Get the metadata of the block. - let meta = unsafe { block.cast::().as_mut() }; - - // Calculate the amount of bytes until the beginning of the possibly next metadata. - let min = aligned_size.saturating_add(size_of::() + Self::align_up()); - - debug_assert!( - (block.as_ptr() as usize) - <= isize::MAX as usize - - meta.size - - size_of::() - - Self::align_up() - ); - - debug_assert!( - meta.size < isize::MAX as usize - size_of::() - Self::align_up() - ); - - // If the block is big enough to split. Then it also needs to be big enough to store the metadata + align of the next block. - if meta.size > min { - // Calculate the remaining size of the block and thus the next metadata. - let remaining_meta = BestFitMeta { - size: meta.size - min, - next: meta.next, - }; - - // Shrink the current block to the requested aligned_size + padding (which is not available to the user). - meta.size = aligned_size; - - // Calculate the pointer to the next metadata. - let ptr = unsafe { Self::user_ptr(block).byte_add(aligned_size) }; - - unsafe { - // Write the new metadata to the memory. - ptr.cast::().write(remaining_meta); - } - - // If there is a previous block, we insert the new block after it. Otherwise we set it as the new head. - if let Some(prev) = prev { - let prev_meta = unsafe { prev.cast::().as_mut() }; - prev_meta.next = Some(ptr); - } else { - self.head = Some(ptr); - } - - // The next block of an allocated block is always None. - meta.next = None; - - (true, block, prev) - } else { - (false, block, prev) - } - } - Err(_) => { - let (block, prev) = self.select_block(size)?; - (false, block, prev) - } - }; - - if !split { - // Get the metadata of the block. - let meta = unsafe { block.cast::().as_mut() }; - - if let Some(prev) = prev { - let prev_meta = unsafe { prev.cast::().as_mut() }; - // If there is a previous block, we remove the current block from the list. Ie. we set the next block of the previous block to the next block of the current block. - prev_meta.next = meta.next; - } else { - // If there is no previous block, we set the next block as the new head. - self.head = meta.next; - } - - // The next block of an allocated block is always None. - meta.next = None; - } - - // Return the user pointer. - Ok(unsafe { Self::user_ptr(block).cast() }) - } - - /// Frees a block of memory. - /// - /// `ptr` - The pointer to the block. - /// `size` - The size of the block. (This is used to check if the size of the block is correct.) - unsafe fn free(&mut self, ptr: NonNull, size: usize) { - let block = unsafe { Self::control_ptr(ptr.cast()) }; - let meta = unsafe { block.cast::().as_mut() }; - - // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list. - meta.next = self.head; - - // Check if the size of the block is correct. - BUG_ON!(meta.size != super::align_up(size), "Invalid size in free()"); - - // Set the size of the block. - meta.size = size; - - // Set the block as the new head. - self.head = Some(block); - } -} - -// TESTING ------------------------------------------------------------------------------------------------------------ - -#[cfg(test)] -mod tests { - use super::*; - - fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { - let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; - let meta = unsafe { control_ptr.cast::().as_ref() }; - - assert!(meta.size >= size); - assert_eq!(meta.next, next); - } - - fn verify_ptrs_not_overlaping(ptrs: &[(NonNull, usize)]) { - for (i, (ptr1, size1)) in ptrs.iter().enumerate() { - for (j, (ptr2, size2)) in ptrs.iter().enumerate() { - if i == j { - continue; - } - - let begin1 = ptr1.as_ptr() as usize; - let end1 = begin1 + size1; - let begin2 = ptr2.as_ptr() as usize; - let end2 = begin2 + size2; - - assert!(end1 <= begin2 || end2 <= begin1); - assert!(begin1 != begin2); - assert!(end1 != end2); - assert!(*size1 > 0); - assert!(*size2 > 0); - assert!(end1 > begin1); - assert!(end2 > begin2); - } - } - } - - fn alloc_range(length: usize) -> Range { - let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); - let ptr = unsafe { std::alloc::alloc(alloc_range) }; - ptr as usize..ptr as usize + length - } - - #[test] - fn allocate_one() { - let mut allocator = BestFitAllocator::new(); - - let range = alloc_range(4096); - unsafe { - allocator.add_range(range).unwrap(); - } - - let ptr = allocator.malloc(128, 1).unwrap(); - - verify_block(ptr, 128, None); - } - - #[test] - fn alloc_alot() { - let mut allocator = BestFitAllocator::new(); - const CNT: usize = 100; - const SIZE: usize = 128; - - let range = alloc_range(SIZE * CNT * 2); - unsafe { - allocator.add_range(range).unwrap(); - } - - let mut ptrs = Vec::new(); - for _ in 0..CNT { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push((ptr, SIZE)); - } - - verify_ptrs_not_overlaping(ptrs.as_slice()); - } - - #[test] - fn alloc_exact() { - let mut allocator = BestFitAllocator::new(); - const CNT: usize = 10; - const SIZE: usize = 128; - - let range = - alloc_range((SIZE + size_of::() + BestFitAllocator::align_up()) * CNT); - unsafe { - allocator.add_range(range).unwrap(); - } - - let mut ptrs = Vec::new(); - for _ in 0..CNT { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push((ptr, SIZE)); - } - - verify_ptrs_not_overlaping(ptrs.as_slice()); - } - - #[test] - fn alloc_oom() { - let mut allocator = BestFitAllocator::new(); - const CNT: usize = 10; - const SIZE: usize = 128; - - let range = - alloc_range((SIZE + size_of::() + BestFitAllocator::align_up()) * CNT - 1); - unsafe { - allocator.add_range(range).unwrap(); - } - - let mut ptrs = Vec::new(); - for _ in 0..CNT - 1 { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push(ptr); - } - - let ptr = allocator.malloc::(SIZE, 1); - assert!(ptr.is_err_and(|e| e == utils::KernelError::OutOfMemory)); - } - - #[test] - fn alloc_no_oom_through_free() { - let mut allocator = BestFitAllocator::new(); - const SIZE: usize = 128; - - let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); - unsafe { - allocator.add_range(range).unwrap(); - } - - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - - unsafe { - allocator.free(ptr, SIZE); - } - - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - } - - #[test] - fn multi_range_alloc() { - let mut allocator = BestFitAllocator::new(); - const CNT: usize = 10; - const SIZE: usize = 128; - - let mut ranges = Vec::new(); - for _ in 0..CNT { - let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); - unsafe { - allocator.add_range(range.clone()).unwrap(); - } - ranges.push(range); - } - - let mut ptrs = Vec::new(); - for _ in 0..CNT { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push((ptr, SIZE)); - } - - verify_ptrs_not_overlaping(ptrs.as_slice()); - } - - #[test] - fn multi_range_no_oom_through_free() { - // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. - let mut allocator = BestFitAllocator::new(); - - const CNT: usize = 10; - const SIZE: usize = 128; - - let mut ranges = Vec::new(); - for _ in 0..CNT { - let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); - unsafe { - allocator.add_range(range.clone()).unwrap(); - } - ranges.push(range); - } - - let mut ptrs = Vec::new(); - - let ptr = allocator.malloc::(SIZE, 1).unwrap(); - - for _ in 0..CNT - 1 { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push((ptr, SIZE)); - } - - unsafe { - allocator.free(ptr, SIZE); - } - - let ptr = allocator.malloc(SIZE, 1).unwrap(); - ptrs.push((ptr, SIZE)); - - verify_ptrs_not_overlaping(ptrs.as_slice()); - } - - #[test] - fn multi_range_oom() { - // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. - let mut allocator = BestFitAllocator::new(); - - const CNT: usize = 10; - const SIZE: usize = 128; - - let mut ranges = Vec::new(); - for _ in 0..CNT { - let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); - unsafe { - allocator.add_range(range.clone()).unwrap(); - } - ranges.push(range); - } - - let mut ptrs = Vec::new(); - - for _ in 0..CNT { - let ptr = allocator.malloc(SIZE, 1).unwrap(); - verify_block(ptr, SIZE, None); - ptrs.push((ptr, SIZE)); - } - - let ptr = allocator.malloc::(SIZE, 1); - assert!(ptr.is_err_and(|e| e == utils::KernelError::OutOfMemory)); - - verify_ptrs_not_overlaping(ptrs.as_slice()); - } -} - -// END TESTING -------------------------------------------------------------------------------------------------------- - -// VERIFICATION ------------------------------------------------------------------------------------------------------- -#[cfg(kani)] -mod verification { - use super::*; - use core::{alloc::Layout, ptr}; - - fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { - let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; - let meta = unsafe { control_ptr.cast::().as_ref() }; - - assert!(meta.size >= size); - assert_eq!(meta.next, next); - } - - fn alloc_range(length: usize) -> Option> { - let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); - let ptr = unsafe { std::alloc::alloc(alloc_range) }; - - if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) { - None - } else { - Some(ptr as usize..ptr as usize + length) - } - } - - #[kani::proof] - #[kani::unwind(2)] - fn allocate_one() { - let mut allocator = BestFitAllocator::new(); - - let size: usize = kani::any(); - kani::assume(size < MAX_ADDR - size_of::() - BestFitAllocator::align_up()); - kani::assume(size > 0); - let larger_size: usize = kani::any_where(|&x| { - x > size + size_of::() + BestFitAllocator::align_up() && x < MAX_ADDR - }); - - if let Some(range) = alloc_range(larger_size) { - unsafe { - assert_eq!(allocator.add_range(range), Ok(())); - } - - let ptr = allocator.malloc(size, 1).unwrap(); - - verify_block(ptr, size, None); - } - } -} -// END VERIFICATION --------------------------------------------------------------------------------------------------- diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs new file mode 100644 index 0000000..9948e58 --- /dev/null +++ b/src/mem/alloc/bestfit.rs @@ -0,0 +1,764 @@ +use core::{ops::Range, ptr::NonNull}; + +use hal::mem::PhysAddr; + +use crate::error::Result; + +/// The metadata that is before any block in the BestFitAllocator. +struct BestFitMeta { + /// The size of the block in bytes. + size: usize, + /// The pointer to the next free block. This is `None` if the block is allocated. + next: Option>, +} + +/// This is an allocator implementation that uses the best fit strategy. +/// That does mean, when we allocate a block, we try to find the smallest block that fits the requested size. +/// Blocks are stored in a singly linked list. The important part is that the linked list is stored in-line with the memory blocks. +/// This means that every block has a header that contains the size of the block and a pointer to the next block. +#[proc_macros::fmt] +pub struct BestFitAllocator { + /// Head of the free block list. + head: Option>, +} + +/// Implementation of the BestFitAllocator. +impl BestFitAllocator { + pub const MIN_RANGE_SIZE: usize = size_of::() + Self::align_up() + 1; + + /// Creates a new BestFitAllocator. + /// + /// Returns the new BestFitAllocator. + pub const fn new() -> Self { + Self { head: None } + } + + /// Adds a range of memory to the allocator. + /// + /// `range` - The range of memory to add. + /// + /// Returns `Ok(())` if the range was added successfully, otherwise an error. + /// + /// # Safety + /// + /// The range must be valid, 128bit aligned and must not overlapping with any other current or future range. + /// The range must also be at least as large as `MIN_RANGE_SIZE`. + /// Also the range must stay valid, for the whole lifetime of the allocator. Also the lifetime of any allocation is only valid as long as the allocator is valid. + pub unsafe fn add_range(&mut self, range: &Range) -> Result<()> { + let ptr = range.start; + + // Check if the pointer is 128bit aligned. + if !ptr.is_multiple_of(align_of::()) { + return Err(kerr!(InvalidArgument)); + } + + if range.end.diff(range.start) < Self::MIN_RANGE_SIZE { + return Err(kerr!(InvalidArgument)); + } + + debug_assert!(range.end > range.start); + debug_assert!(range.end.diff(range.start) > size_of::() + Self::align_up()); + debug_assert!(range.end.as_usize() <= isize::MAX as usize); + + // The user pointer is the pointer to the user memory. So we need to add the size of the meta data and possibly add padding. + let user_pointer = ptr + size_of::() + Self::align_up(); + + // Set the current head as the next block, so we can add the new block to the head. + let meta = BestFitMeta { + size: range.end.diff(user_pointer), + next: self.head, + }; + + // Write the header to the memory. + unsafe { core::ptr::write(ptr.as_mut_ptr::(), meta) }; + + // Set the head to the new block. + self.head = Some(unsafe { NonNull::new_unchecked(ptr.as_mut_ptr::()) }); + Ok(()) + } + + /// Calculates the padding required to align the block. Note: We only align to 128bit. + /// + /// Returns the padding in bytes. + const fn align_up() -> usize { + let meta = size_of::(); + let align = align_of::(); + // Calculate the padding required to align the block. + (align - (meta % align)) % align + } + + /// Selects the best fit block for the given size. + /// + /// `size` - The size of the block. + /// + /// Returns the control pointer to the block and the control pointer to the previous block. + fn select_block( + &mut self, + size: usize, + requested: Option, + ) -> Result<(NonNull, Option>)> { + let mut best_fit = Err(kerr!(OutOfMemory)); + let mut best_fit_size = usize::MAX; + + let mut current = self.head; + let mut prev = None; + + if let Some(requested) = requested { + while let Some(ptr) = current { + // Get the metadata of the block. + let meta = unsafe { ptr.cast::().as_ref() }; + + if unsafe { Self::contains(meta, requested, size) } { + return Ok((ptr, prev)); + } + + // Move to the next block. + prev = current; + current = meta.next; + } + } + + // Iterate over all blocks and find the best fit. + while let Some(ptr) = current { + // Get the metadata of the block. + let meta = unsafe { ptr.cast::().as_ref() }; + + // Check if the block is big enough and smaller than the current best fit. + if meta.size >= size && meta.size <= best_fit_size { + best_fit = Ok((ptr, prev)); + best_fit_size = meta.size; + } + + // Move to the next block. + prev = current; + current = meta.next; + } + + best_fit + } + + /// Calculates the user pointer from the control pointer. + /// + /// `ptr` - The control pointer. + /// + /// Returns the user pointer. + /// + /// # Safety + /// + /// The ptr must be a valid control pointer. Note: After the allocator which allocated the pointer is dropped, the control pointer is always considered invalid. + unsafe fn user_ptr(ptr: NonNull) -> NonNull { + debug_assert!( + (ptr.as_ptr() as usize) + <= isize::MAX as usize - size_of::() - Self::align_up() + ); + unsafe { ptr.byte_add(size_of::() + Self::align_up()) } + } + + /// Calculates the control pointer from the user pointer. + /// + /// `ptr` - The user pointer. + /// + /// Returns the control pointer. + /// + /// # Safety + /// + /// The ptr must be a valid user pointer. Note: After the allocator which allocated the pointer is dropped, the user pointer is always considered invalid. + unsafe fn control_ptr(ptr: NonNull) -> NonNull { + debug_assert!((ptr.as_ptr() as usize) > size_of::() + Self::align_up()); + unsafe { ptr.byte_sub(size_of::() + Self::align_up()) } + } + + unsafe fn contains(meta: &BestFitMeta, target: PhysAddr, size: usize) -> bool { + let begin = unsafe { + Self::user_ptr(NonNull::new_unchecked( + meta as *const BestFitMeta as *mut u8, + )) + }; + debug_assert!(size > 0); + + if target >= begin.into() { + if let Some(target) = target.checked_add(size) { + if target > (unsafe { begin.add(meta.size) }).into() { + return false; + } + } else { + return false; + } + return true; + } + false + } +} + +/// Implementation of the Allocator trait for BestFitAllocator. +impl super::Allocator for BestFitAllocator { + /// Allocates a block of memory with the given size and alignment. Note: This function will always yield an invalid align for align > 128bit. + /// + /// `size` - The size of the block. + /// `align` - The alignment of the block. + /// + /// Returns the user pointer to the block if successful, otherwise an error. + fn malloc( + &mut self, + size: usize, + align: usize, + request: Option, + ) -> Result> { + // Check if the alignment is valid. + if align == 0 || align > align_of::() { + return Err(kerr!(InvalidAlign)); + } + + if let Some(request) = request { + if !request.is_multiple_of(align) { + return Err(kerr!(InvalidAlign)); + } + } + + // Check if the size is valid. + if size == 0 { + return Err(kerr!(InvalidArgument)); + } + + // For some cfg this warning is correct. But for others its not. + #[allow(clippy::absurd_extreme_comparisons)] + if size >= super::MAX_ADDR { + return Err(kerr!(InvalidArgument)); + } + + // Align the size. + let aligned_size = super::super::align_up(size); + debug_assert!(aligned_size >= size); + debug_assert!(aligned_size <= isize::MAX as usize); + + // Find the best fit block. + let (split, block, prev) = match self.select_block(aligned_size, request) { + Ok((block, prev)) => { + // Get the metadata of the block. + let meta = unsafe { block.cast::().as_mut() }; + + // If we requested a specific address. The size must be extended by the offset from block start to the requested address. + let aligned_size = if let Some(request) = request { + aligned_size + request.diff(unsafe { Self::user_ptr(block) }.into()) + } else { + aligned_size + }; + + // Calculate the amount of bytes until the beginning of the possibly next metadata. + let min = aligned_size.saturating_add(size_of::() + Self::align_up()); + + debug_assert!( + (block.as_ptr() as usize) + <= isize::MAX as usize + - meta.size + - size_of::() + - Self::align_up() + ); + + debug_assert!( + meta.size < isize::MAX as usize - size_of::() - Self::align_up() + ); + + // If the block is big enough to split. Then it also needs to be big enough to store the metadata + align of the next block. + if meta.size > min { + // Calculate the remaining size of the block and thus the next metadata. + let remaining_meta = BestFitMeta { + size: meta.size - min, + next: meta.next, + }; + + // Shrink the current block to the requested aligned_size + padding (which is not available to the user). + meta.size = aligned_size; + + // Calculate the pointer to the next metadata. + let ptr = unsafe { Self::user_ptr(block).byte_add(aligned_size) }; + + unsafe { + // Write the new metadata to the memory. + ptr.cast::().write(remaining_meta); + } + + // If there is a previous block, we insert the new block after it. Otherwise we set it as the new head. + if let Some(prev) = prev { + let prev_meta = unsafe { prev.cast::().as_mut() }; + prev_meta.next = Some(ptr); + } else { + self.head = Some(ptr); + } + + // The next block of an allocated block is always None. + meta.next = None; + + (true, block, prev) + } else { + (false, block, prev) + } + } + Err(_) => { + let (block, prev) = self.select_block(size, request)?; + (false, block, prev) + } + }; + + if !split { + // Get the metadata of the block. + let meta = unsafe { block.cast::().as_mut() }; + + if let Some(prev) = prev { + let prev_meta = unsafe { prev.cast::().as_mut() }; + // If there is a previous block, we remove the current block from the list. Ie. we set the next block of the previous block to the next block of the current block. + prev_meta.next = meta.next; + } else { + // If there is no previous block, we set the next block as the new head. + self.head = meta.next; + } + + // The next block of an allocated block is always None. + meta.next = None; + } + + if let Some(request) = request { + debug_assert!(unsafe { + Self::contains(block.cast::().as_ref(), request, size) + }); + } + + // Return the user pointer. + Ok(unsafe { Self::user_ptr(block).cast() }) + } + + /// Frees a block of memory. + /// + /// `ptr` - The pointer to the block. + /// `size` - The size of the block. (This is used to check if the size of the block is correct.) + unsafe fn free(&mut self, ptr: NonNull, size: usize) { + let block = unsafe { Self::control_ptr(ptr.cast()) }; + let meta = unsafe { block.cast::().as_mut() }; + + // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list. + meta.next = self.head; + + // Check if the size of the block is correct. + bug_on!( + meta.size != super::super::align_up(size), + "Invalid size in free()" + ); + + // Set the size of the block (must stay aligned, matching what malloc stored). + meta.size = super::super::align_up(size); + + // Set the block as the new head. + self.head = Some(block); + } +} + +// TESTING ------------------------------------------------------------------------------------------------------------ + +#[cfg(test)] +mod tests { + use crate::error::Kind; + use crate::mem::align_up; + + use super::super::*; + use super::*; + + fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { + let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; + let meta = unsafe { control_ptr.cast::().as_ref() }; + + assert!(meta.size >= size); + assert_eq!(meta.next, next); + } + + fn verify_ptrs_not_overlaping(ptrs: &[(NonNull, usize)]) { + for (i, (ptr1, size1)) in ptrs.iter().enumerate() { + for (j, (ptr2, size2)) in ptrs.iter().enumerate() { + if i == j { + continue; + } + + let begin1 = ptr1.as_ptr() as usize; + let end1 = begin1 + size1; + let begin2 = ptr2.as_ptr() as usize; + let end2 = begin2 + size2; + + assert!(end1 <= begin2 || end2 <= begin1); + assert!(begin1 != begin2); + assert!(end1 != end2); + assert!(*size1 > 0); + assert!(*size2 > 0); + assert!(end1 > begin1); + assert!(end2 > begin2); + } + } + } + + fn alloc_range(length: usize) -> Range { + let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); + let ptr = unsafe { std::alloc::alloc(alloc_range) }; + PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length) + } + + #[test] + fn allocate_one() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let ptr = allocator.malloc(128, 1, None).unwrap(); + + verify_block(ptr, 128, None); + } + + #[test] + fn alloc_request() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let request = range.start + 128; + let ptr = allocator.malloc::(128, 1, Some(request)).unwrap(); + + // Check that the returned pointer contains the requested address. + let meta = unsafe { + BestFitAllocator::control_ptr(ptr) + .cast::() + .as_ref() + }; + assert!(unsafe { BestFitAllocator::contains(meta, request, 128) }); + } + + #[test] + fn alloc_request_to_big() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let request = range.start + 4096; + let ptr = allocator.malloc::(128, 1, Some(request)); + + assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory)); + } + + #[test] + fn alloc_request_not_aligned() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let request = range.start + 127; + let ptr = allocator.malloc::(128, 8, Some(request)); + + assert!(ptr.is_err_and(|e| e.kind == Kind::InvalidAlign)); + } + + #[test] + fn alloc_request_not_available() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let request = range.start + 128; + let ptr = allocator.malloc::(128, 1, Some(request)).unwrap(); + verify_block(ptr, 128, None); + + let ptr = allocator.malloc::(128, 1, Some(request)); + assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory)); + } + + #[test] + fn alloc_request_out_of_range() { + let mut allocator = BestFitAllocator::new(); + + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let request = range.end + 128; + let ptr = allocator.malloc::(128, 1, Some(request)); + + assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory)); + } + + #[test] + fn alloc_alot() { + let mut allocator = BestFitAllocator::new(); + const CNT: usize = 100; + const SIZE: usize = 128; + + let range = alloc_range(SIZE * CNT * 2); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let mut ptrs = Vec::new(); + for _ in 0..CNT { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push((ptr, SIZE)); + } + + verify_ptrs_not_overlaping(ptrs.as_slice()); + } + + #[test] + fn alloc_exact() { + let mut allocator = BestFitAllocator::new(); + const CNT: usize = 10; + const SIZE: usize = 128; + + let range = + alloc_range((SIZE + size_of::() + BestFitAllocator::align_up()) * CNT); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let mut ptrs = Vec::new(); + for _ in 0..CNT { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push((ptr, SIZE)); + } + + verify_ptrs_not_overlaping(ptrs.as_slice()); + } + + #[test] + fn alloc_oom() { + let mut allocator = BestFitAllocator::new(); + const CNT: usize = 10; + const SIZE: usize = 128; + + let range = + alloc_range((SIZE + size_of::() + BestFitAllocator::align_up()) * CNT - 1); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let mut ptrs = Vec::new(); + for _ in 0..CNT - 1 { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push(ptr); + } + + let ptr = allocator.malloc::(SIZE, 1, None); + assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory)); + } + + #[test] + fn alloc_no_oom_through_free() { + let mut allocator = BestFitAllocator::new(); + const SIZE: usize = 128; + + let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + + unsafe { + allocator.free(ptr, SIZE); + } + + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + } + + #[test] + fn multi_range_alloc() { + let mut allocator = BestFitAllocator::new(); + const CNT: usize = 10; + const SIZE: usize = 128; + + let mut ranges = Vec::new(); + for _ in 0..CNT { + let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + ranges.push(range); + } + + let mut ptrs = Vec::new(); + for _ in 0..CNT { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push((ptr, SIZE)); + } + + verify_ptrs_not_overlaping(ptrs.as_slice()); + } + + #[test] + fn multi_range_no_oom_through_free() { + // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. + let mut allocator = BestFitAllocator::new(); + + const CNT: usize = 10; + const SIZE: usize = 128; + + let mut ranges = Vec::new(); + for _ in 0..CNT { + let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + ranges.push(range); + } + + let mut ptrs = Vec::new(); + + let ptr = allocator.malloc::(SIZE, 1, None).unwrap(); + + for _ in 0..CNT - 1 { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push((ptr, SIZE)); + } + + unsafe { + allocator.free(ptr, SIZE); + } + + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + ptrs.push((ptr, SIZE)); + + verify_ptrs_not_overlaping(ptrs.as_slice()); + } + + #[test] + fn free_corrupts_metadata() { + let mut allocator = BestFitAllocator::new(); + // Use a size NOT 16-byte aligned so align_up(size) > size. + const SIZE: usize = 17; + const ALIGNED: usize = 32; // align_up(17) on 64-bit: (17+15)&!15 = 32 + assert!(align_up(SIZE) == ALIGNED); + + // Allocate just enough space for one block. + let range = alloc_range(ALIGNED + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + + // First alloc: meta.size = align_up(17) = 32. + let ptr1: core::ptr::NonNull = allocator.malloc(SIZE, 1, None).unwrap(); + + // First free: meta.size set to 17 (BUG: should stay 32). + unsafe { + allocator.free(ptr1, SIZE); + } + + // Second alloc: select_block(32) fails (meta.size=17 < 32), fallback select_block(17) + // succeeds and returns the block with meta.size still = 17. + let ptr2: core::ptr::NonNull = allocator + .malloc(SIZE, 1, None) + .expect("second malloc should succeed via fallback path"); + + // Second free: bug_on!(meta.size(17) != align_up(17)(32)) → panics. + unsafe { + allocator.free(ptr2, SIZE); + } + } + + #[test] + fn multi_range_oom() { + // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. + let mut allocator = BestFitAllocator::new(); + + const CNT: usize = 10; + const SIZE: usize = 128; + + let mut ranges = Vec::new(); + for _ in 0..CNT { + let range = alloc_range(SIZE + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + ranges.push(range); + } + + let mut ptrs = Vec::new(); + + for _ in 0..CNT { + let ptr = allocator.malloc(SIZE, 1, None).unwrap(); + verify_block(ptr, SIZE, None); + ptrs.push((ptr, SIZE)); + } + + let ptr = allocator.malloc::(SIZE, 1, None); + assert!(ptr.is_err_and(|e| e.kind == Kind::OutOfMemory)); + + verify_ptrs_not_overlaping(ptrs.as_slice()); + } +} + +// END TESTING -------------------------------------------------------------------------------------------------------- + +// VERIFICATION ------------------------------------------------------------------------------------------------------- +#[cfg(kani)] +mod verification { + use super::super::*; + use super::*; + + fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { + let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; + let meta = unsafe { control_ptr.cast::().as_ref() }; + + assert!(meta.size >= size); + assert_eq!(meta.next, next); + } + + fn alloc_range(length: usize) -> Option> { + let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); + let ptr = unsafe { std::alloc::alloc(alloc_range) }; + + if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) { + None + } else { + Some(PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length)) + } + } + + #[kani::proof] + #[kani::unwind(2)] + fn allocate_one() { + let mut allocator = BestFitAllocator::new(); + + let size: usize = kani::any(); + kani::assume(size < MAX_ADDR - size_of::() - BestFitAllocator::align_up()); + kani::assume(size > 0); + let larger_size: usize = kani::any_where(|&x| { + x > size + size_of::() + BestFitAllocator::align_up() && x < MAX_ADDR + }); + + if let Some(range) = alloc_range(larger_size) { + unsafe { + assert!(matches!(allocator.add_range(&range), Ok(()))); + } + + let ptr = allocator.malloc(size, 1, None).unwrap(); + + verify_block(ptr, size, None); + } + } +} +// END VERIFICATION --------------------------------------------------------------------------------------------------- diff --git a/src/mem/heap.rs b/src/mem/heap.rs deleted file mode 100644 index b3b0af3..0000000 --- a/src/mem/heap.rs +++ /dev/null @@ -1,107 +0,0 @@ -//! This module provides a binary heap implementation. - -use super::array::Vec; -use crate::utils::KernelError; - -/// An array-based binary heap, with N elements stored inline. -#[derive(Debug)] -pub struct BinaryHeap { - vec: Vec, -} - -impl BinaryHeap { - /// Create a new empty binary heap. - pub const fn new() -> Self { - Self { vec: Vec::new() } - } - - /// Push a value onto the binary heap. - /// - /// `value` - The value to push onto the binary heap. - /// - /// Returns `Ok(())` if the value was pushed onto the binary heap, or an error if the heap cannot be extended (e.g. OOM). - pub fn push(&mut self, value: T) -> Result<(), KernelError> { - self.vec.push(value)?; - self.sift_up(self.len() - 1); - Ok(()) - } - - /// Pop the smallest value from the binary heap. - /// - /// Returns the smallest value in the binary heap, or `None` if the heap is empty. - pub fn pop(&mut self) -> Option { - if self.is_empty() { - return None; - } - - let value = self.peek().cloned(); - self.vec.swap(0, self.len() - 1); - self.vec.pop(); - self.sift_down(0); - value - } - - /// Sift the value at the given index up the binary heap. - /// - /// `index` - The index of the value to sift up. - fn sift_up(&mut self, mut index: usize) { - // We move up the heap until we reach the root or the parent is smaller than the current value. - while index > 0 { - let parent = (index - 1) / 2; - if self.vec.at(parent) <= self.vec.at(index) { - break; - } - self.vec.swap(parent, index); - index = parent; - } - } - - /// Sift the value at the given index down the binary heap. - /// - /// `index` - The index of the value to sift down. - fn sift_down(&mut self, mut index: usize) { - // We move down the heap until we reach a leaf or the value is smaller than both children. - while index < self.len() { - let left = 2 * index + 1; - let right = 2 * index + 2; - let mut smallest = index; - - if left < self.len() && self.vec.at(left) < self.vec.at(smallest) { - smallest = left; - } - - if right < self.len() && self.vec.at(right) < self.vec.at(smallest) { - smallest = right; - } - - if smallest == index { - break; - } - - self.vec.swap(smallest, index); - index = smallest; - } - } - - /// Check if the binary heap is empty. - /// - /// Returns `true` if the binary heap is empty, `false` otherwise. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Peek at the smallest value in the binary heap. - /// - /// Returns the smallest value in the binary heap, or `None` if the heap is empty. - pub fn peek(&self) -> Option<&T> { - if self.is_empty() { - return None; - } - self.vec.at(0) - } - - /// Get the number of elements in the binary heap. - pub fn len(&self) -> usize { - self.vec.len() - } -} diff --git a/src/mem/pfa.rs b/src/mem/pfa.rs new file mode 100644 index 0000000..e20f212 --- /dev/null +++ b/src/mem/pfa.rs @@ -0,0 +1,58 @@ +// The top level page frame allocator. + +use hal::mem::PhysAddr; + +use crate::error::Result; +use crate::sync::spinlock::SpinLocked; +use crate::types::boxed::Box; + +use core::pin::Pin; + +mod bitset; + +/// Page size constant (typically 4KB) +pub const PAGE_SIZE: usize = 4096; + +const PAGE_CNT: usize = 100; // TODO: This should be determined by the DeviceTree. + +type AllocatorType = bitset::Allocator; + +static PFA: SpinLocked>>> = SpinLocked::new(None); + +/// This trait abstracts over different page frame allocator implementations. +trait Allocator { + /// Returns an initializer function that can be used to create an instance of the allocator. + /// The initializer function takes a physical address and the amount of pages needed. + /// + /// Safety: + /// + /// - The returned function must only be called with a useable and valid physical address. + fn initializer() -> unsafe fn(PhysAddr, usize) -> Result>>; + + fn alloc(&mut self, page_count: usize) -> Option; + fn free(&mut self, addr: PhysAddr, page_count: usize); +} + +pub fn init_pfa(addr: PhysAddr) -> Result<()> { + let mut pfa = PFA.lock(); + if pfa.is_some() { + return Err(kerr!(InvalidArgument)); + } + + let initializer = AllocatorType::initializer(); + *pfa = Some(unsafe { initializer(addr, PAGE_CNT)? }); + + Ok(()) +} + +pub fn alloc_page(page_count: usize) -> Option { + let mut pfa = PFA.lock(); + pfa.as_mut()?.alloc(page_count) +} + +pub fn free_page(addr: PhysAddr, page_count: usize) { + let mut pfa = PFA.lock(); + if let Some(pfa) = pfa.as_mut() { + pfa.free(addr, page_count); + } +} \ No newline at end of file diff --git a/src/mem/pfa/bitset.rs b/src/mem/pfa/bitset.rs new file mode 100644 index 0000000..16137e6 --- /dev/null +++ b/src/mem/pfa/bitset.rs @@ -0,0 +1,260 @@ +use core::pin::Pin; +use core::ptr::NonNull; + +use hal::mem::PhysAddr; + +use crate::{ + error::Result, types::boxed::{self, Box} +}; + +pub struct Allocator { + begin: PhysAddr, + l1: [usize; N], +} + +impl Allocator { + const BITS_PER_WORD: usize = usize::BITS as usize; + + pub fn new(begin: PhysAddr) -> Option { + if !begin.is_multiple_of(super::PAGE_SIZE) { + return None; + } + + if begin > PhysAddr::MAX - (N * super::PAGE_SIZE * usize::BITS as usize) { + return None; + } + + Some(Self { + begin, + l1: [!0; N], // All bits are set to 1, meaning all pages are free. + }) + } +} + +impl super::Allocator for Allocator { + fn initializer() -> unsafe fn(PhysAddr, usize) -> Result>> { + |addr: PhysAddr, pcnt: usize| -> Result>> { + if pcnt > N { + todo!("Runtime page frame allocator for more than {} pages", N) + } + + if !addr.is_multiple_of(core::mem::align_of::()) { + return Err(kerr!(InvalidArgument)); + } + + let ptr = NonNull::new(addr.as_mut_ptr::()).ok_or(kerr!(InvalidArgument))?; + // Align this up to PAGE_SIZE + let begin = addr + size_of::(); + let begin = if begin.is_multiple_of(super::PAGE_SIZE) { + begin + } else { + PhysAddr::new((begin.as_usize() + super::PAGE_SIZE - 1) & !(super::PAGE_SIZE - 1)) + }; + // TODO: Subtract the needed pages from the available + unsafe { core::ptr::write(ptr.as_ptr(), Self::new(begin).ok_or(kerr!(InvalidArgument))?) }; + + // Safety: Ptr is properly aligned and non-null. The validity of the memory at that address is valid by the call contract. + Ok(Pin::new(unsafe { boxed::Box::from_raw(ptr) })) + } + } + + fn alloc(&mut self, page_count: usize) -> Option { + // If a bit is 1 the page is free. If a bit is 0 the page is allocated. + let mut start = 0; + let mut len = 0usize; + + let rem = page_count.saturating_sub(Self::BITS_PER_WORD); + let mask = (!0usize).unbounded_shl((Self::BITS_PER_WORD.saturating_sub(page_count)) as u32); + + for idx in 0..N { + if self.l1[idx] == 0 { + len = 0; + continue; + } + + let mut byte = self.l1[idx]; + + let mut shift = if len > 0 { + 0usize + } else { + byte.leading_zeros() as usize + }; + + byte <<= shift; + + while shift < Self::BITS_PER_WORD { + // Make the mask smaller if we already have some contiguous bits. + let mask = if rem.saturating_sub(len) == 0 { + mask << (len - rem) + } else { + mask + }; + + // We shifted byte to MSB, mask is already aligned to the left. + // We compare them via and and shift to the right to shift out extra bits from the mask that would overflow into the next word. + let mut found = (byte & mask) >> shift; + + // We also need to shift the mask to the right so that we can compare mask and found. + if found == (mask >> shift) { + if len == 0 { + start = idx * Self::BITS_PER_WORD + shift; + } + + // Shift completely to the right. + found >>= found.trailing_zeros(); + + // As all found bits are now on the right we can just count them to get the amount we found. + len += found.trailing_ones() as usize; + // Continue to the next word if we haven't found enough bits yet. + break; + } else { + len = 0; + } + + shift += 1; + byte <<= 1; + } + + if len >= page_count { + // Mark the allocated pages as used. + let mut idx = start / Self::BITS_PER_WORD; + + // Mark all bits in the first word as used. + { + let skip = start % Self::BITS_PER_WORD; + let rem = len.min(Self::BITS_PER_WORD) - skip; + + self.l1[idx] &= !((!0usize).unbounded_shl((Self::BITS_PER_WORD - rem) as u32) >> skip); + + if len <= rem { + return Some(self.begin + (start * super::PAGE_SIZE)); + } + + len -= rem; + idx += 1; + } + + // Mark all bits in the middle words as used. + { + let mid_cnt = len / Self::BITS_PER_WORD; + + for i in 0..mid_cnt { + self.l1[idx + i] = 0; + } + + idx += mid_cnt; + } + + // Mark the remaining bits in the last word as used. + self.l1[idx] &= !((!0usize).unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); + return Some(self.begin + (start * super::PAGE_SIZE)); + } + } + + None + } + + fn free(&mut self, addr: PhysAddr, page_count: usize) { + if !addr.is_multiple_of(super::PAGE_SIZE) { + panic!("Address must be page aligned"); + } + + let mut idx = (addr.as_usize() - self.begin.as_usize()) / super::PAGE_SIZE / Self::BITS_PER_WORD; + let mut bit_idx = ((addr.as_usize() - self.begin.as_usize()) / super::PAGE_SIZE) % Self::BITS_PER_WORD; + + // TODO: slow + for _ in 0..page_count { + self.l1[idx] |= 1 << (Self::BITS_PER_WORD - 1 - bit_idx); + + bit_idx += 1; + + if bit_idx == Self::BITS_PER_WORD { + bit_idx = 0; + idx += 1; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn last_bit_underflow() { + // Only the last page in word 0 is free + let mut allocator = Allocator::<1>::new(PhysAddr::new(0)).unwrap(); + allocator.l1[0] = 1; + + let result = super::super::Allocator::alloc(&mut allocator, 1); + + assert!(result.is_some()); + } + + #[test] + fn test_random_pattern() { + const ITERATIONS: usize = 1000; + + for _ in 0..ITERATIONS { + const N: usize = 1024; + const BITS: usize = Allocator::::BITS_PER_WORD; + const ALLOC_SIZE: usize = 100; + + let mut allocator = Allocator::::new(PhysAddr::new(0x0)).unwrap(); + + // Generate a random bit pattern. + for i in 0..N { + let is_zero = rand::random::(); + + if is_zero { + allocator.l1[i / BITS] &= !(1 << ((BITS - 1) - (i % BITS))); + } + } + + // Place a run of ALLOC_SIZE contiguous bits set to 1 at a random position. + let start = rand::random::() % (N - ALLOC_SIZE); + for i in start..(start + ALLOC_SIZE) { + allocator.l1[i / BITS] |= 1 << ((BITS - 1) - (i % BITS)); + } + + let pre = allocator.l1.clone(); + + let addr = super::super::Allocator::alloc(&mut allocator, ALLOC_SIZE).unwrap(); + let idx = addr.as_usize() / super::super::PAGE_SIZE; + + // Check that the bits in returned addresses is all ones in pre. + for i in 0..ALLOC_SIZE { + let bit = (pre[(idx + i) / BITS] >> ((BITS - 1) - ((idx + i) % BITS))) & 1; + assert_eq!(bit, 1, "Bit at index {} is not set", idx + i); + } + + // Check that the bits in returned addresses is all zeros in allocator.l1. + for i in 0..ALLOC_SIZE { + let bit = (allocator.l1[(idx + i) / BITS] >> ((BITS - 1) - ((idx + i) % BITS))) & 1; + assert_eq!(bit, 0, "Bit at index {} is not cleared", idx + i); + } + } + } +} + +#[cfg(kani)] +mod verification { + use super::*; + use hal::mem::PhysAddr; + + #[kani::proof] + #[kani::unwind(70)] + fn verify_alloc_no_rem_underflow_single_word() { + let mut allocator = Allocator::<1>::new(PhysAddr::new(0)).unwrap(); + + let l1_0: usize = kani::any(); + + allocator.l1[0] = l1_0; + + let page_count: usize = kani::any(); + + kani::assume(page_count >= 1 && page_count <= 64); + + let _ = super::super::Allocator::alloc(&mut allocator, page_count); + } +} diff --git a/src/mem/vmm.rs b/src/mem/vmm.rs new file mode 100644 index 0000000..6ddc7e9 --- /dev/null +++ b/src/mem/vmm.rs @@ -0,0 +1,73 @@ +use hal::mem::{PhysAddr, VirtAddr}; + +use crate::error::Result; + +mod nommu; + +pub type AddressSpace = nommu::AddressSpace; + +bitflags::bitflags! { + #[derive(Clone, Copy)] + pub struct Perms: u8 { + const Read = 0b0001; + const Write = 0b0010; + const Exec = 0b0100; + } +} + +#[derive(Clone)] +pub enum Backing { + Zeroed, + Uninit, + Anon(PhysAddr), +} + +#[derive(Clone)] +pub struct Region { + start: Option, + len: usize, + backing: Backing, + perms: Perms, +} + +impl Region { + /// Creates a new region. + /// + /// - `start` is the starting virtual address of the region. If `None`, the system will choose a suitable address. + /// - `len` is the length of the region in bytes. + /// - `backing` is the backing type of the region, which determines how the region is initialized and where its contents come from. + /// - `perms` is the permissions of the region, which determines how the region can be accessed. + /// + pub fn new(start: Option, len: usize, backing: Backing, perms: Perms) -> Self { + Self { + start, + len, + backing, + perms, + } + } + + pub fn start(&self) -> VirtAddr { + self.start.unwrap_or_else(|| VirtAddr::new(0)) + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn contains(&self, addr: VirtAddr) -> bool { + self.start().saturating_add(self.len()) > addr && addr >= self.start() + } +} + +pub trait AddressSpacelike { + // Size is the amount of pages in the address space. On nommu systems this will be reserved. + fn new(pages: usize) -> Result where Self: Sized; + fn map(&mut self, region: Region) -> Result; + fn unmap(&mut self, region: &Region) -> Result<()>; + fn protect(&mut self, region: &Region, perms: Perms) -> Result<()>; + fn virt_to_phys(&self, addr: VirtAddr) -> Option; + fn phys_to_virt(&self, addr: PhysAddr) -> Option; + fn end(&self) -> VirtAddr; + fn activate(&self) -> Result<()>; +} \ No newline at end of file diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs new file mode 100644 index 0000000..f169c14 --- /dev/null +++ b/src/mem/vmm/nommu.rs @@ -0,0 +1,81 @@ +use core::ptr::copy_nonoverlapping; + +use hal::mem::{PhysAddr, VirtAddr}; + +use crate::{ + error::Result, mem::{ + alloc::{Allocator, bestfit}, + pfa, vmm, + } +}; + +pub struct AddressSpace { + begin: PhysAddr, + end: PhysAddr, + allocator: bestfit::BestFitAllocator, +} + +impl vmm::AddressSpacelike for AddressSpace { + fn new(pgs: usize) -> Result { + let begin = pfa::alloc_page(pgs).ok_or(kerr!(OutOfMemory))?; + let end = begin + .checked_add(pgs * pfa::PAGE_SIZE) + .ok_or(kerr!(OutOfMemory))?; + + let mut allocator = bestfit::BestFitAllocator::new(); + unsafe { allocator.add_range(&(begin..end))? }; + + Ok(Self { + begin, + end, + allocator, + }) + } + + fn map(&mut self, region: vmm::Region) -> Result { + let req = region.start.and_then(|virt| self.virt_to_phys(virt)); + // TODO: per page align + let align = core::mem::align_of::(); + let start = self.allocator.malloc::(region.len(), align, req)?; + + match region.backing { + vmm::Backing::Anon(phys) => { + unsafe { + copy_nonoverlapping(phys.as_mut_ptr::(), start.as_ptr(), region.len()) + }; + } + vmm::Backing::Zeroed => { + unsafe { core::ptr::write_bytes(start.as_ptr(), 0, region.len()) }; + } + vmm::Backing::Uninit => {} + } + + Ok(start.into()) + } + + fn unmap(&mut self, _region: &vmm::Region) -> Result<()> { + Ok(()) + } + + fn protect(&mut self, _region: &vmm::Region, _perms: vmm::Perms) -> Result<()> { + Ok(()) + } + + fn phys_to_virt(&self, addr: PhysAddr) -> Option { + addr.checked_sub(self.begin.as_usize()) + .map(|phys| VirtAddr::new(phys.as_usize())) + } + + fn virt_to_phys(&self, addr: VirtAddr) -> Option { + self.begin.checked_add(addr.as_usize()) + } + + fn end(&self) -> VirtAddr { + // This should always succeed. + self.phys_to_virt(self.end).unwrap() + } + + fn activate(&self) -> Result<()> { + Ok(()) + } +} diff --git a/src/sched.rs b/src/sched.rs index 70ae413..4e5f130 100644 --- a/src/sched.rs +++ b/src/sched.rs @@ -1,56 +1,401 @@ //! This module provides access to the scheduler. -pub mod scheduler; +mod dispch; +pub mod rr; +pub mod rt; pub mod task; pub mod thread; +use core::{ + ffi::c_void, + sync::atomic::{AtomicBool, Ordering}, +}; + use hal::Schedable; -use crate::utils::KernelError; +use crate::{ + error::Result, + mem, + sched::thread::Waiter, + sync::{self, atomic::AtomicU64, spinlock::SpinLocked}, + time::{self}, + types::{ + array::IndexMap, + rbtree::RbTree, + traits::{Get, GetMut}, + view::ViewMut, + }, +}; -/// Reschedule the tasks. -pub fn reschedule() { - hal::Machine::trigger_reschedule(); -} +type ThreadMap = IndexMap; +type TaskMap = IndexMap; + +type GlobalScheduler = Scheduler<32>; + +static SCHED: SpinLocked = SpinLocked::new(GlobalScheduler::new()); + +static DISABLED: AtomicBool = AtomicBool::new(true); +static NEXT_TICK: AtomicU64 = AtomicU64::new(0); + +type WaiterView<'a, const N: usize> = ViewMut<'a, thread::UId, thread::Waiter, ThreadMap>; + +pub struct Scheduler { + threads: ThreadMap, + tasks: TaskMap, + id_gen: usize, + + rt_scheduler: rt::Scheduler, + rr_scheduler: rr::Scheduler, -/// Create a new task. -/// -/// `desc` - The task descriptor. -/// `main_desc` - The main thread descriptor. -/// `main_timing` - The timing information for the main thread. -/// -/// Returns the task ID if the task was created successfully, or an error if the task could not be created. -pub fn create_task(desc: task::TaskDescriptor) -> Result { - enable_scheduler(false); - let res = scheduler::SCHEDULER.lock().create_task(desc); - enable_scheduler(true); + wakeup: RbTree, - res + current: Option, + last_tick: u64, } -pub fn create_thread( - task_id: task::TaskId, - entry: extern "C" fn(), - fin: Option !>, - timing: thread::Timing, -) -> Result { - enable_scheduler(false); - let res = scheduler::SCHEDULER - .lock() - .create_thread(entry, fin, timing, task_id); - enable_scheduler(true); +impl Scheduler { + pub const fn new() -> Self { + Self { + threads: IndexMap::new(), + tasks: IndexMap::new(), + id_gen: 1, + rt_scheduler: rt::Scheduler::new(), + rr_scheduler: rr::Scheduler::new(), + wakeup: RbTree::new(), + current: None, + last_tick: 0, + } + } + + fn land(&mut self, ctx: *mut c_void) { + if let Some(current) = self.current { + let mut kill = None; + if let Some(thread) = self.threads.get_mut(current) { + if thread.save_ctx(ctx).is_err() { + warn!( + "failed to save context (SP: {:x}) of thread {}.", + ctx as usize, current + ); + kill = Some(thread.task_id()); + } + } else { + bug!("failed to land thread {}. Does not exist.", current); + } + + if let Some(task_id) = kill { + self.dequeue(current); + self.current = None; + if self.kill_task(task_id).is_err() { + // Should not be possible. The thread exists, so the task must exist. + bug!("failed to kill task {}", task_id); + } + } + } + } + + /// Triggers a reschedule at *latest* when we hit timepoint `next`. + fn next_resched(now: u64, next: u64) { + let old = NEXT_TICK.load(Ordering::Acquire); + + if old > now && old <= next { + return; + } + + NEXT_TICK.store(next, Ordering::Release); + } + + pub fn enqueue(&mut self, now: u64, uid: thread::UId) -> Result<()> { + let thread = self.threads.get(uid).ok_or(kerr!(InvalidArgument))?; + + if thread.rt_server().is_some() { + let mut view = rt::ServerView::::new(&mut self.threads); + self.rt_scheduler.enqueue(uid, now, &mut view); + } else { + if self.rr_scheduler.enqueue(uid, &mut self.threads).is_err() { + // This should not be possible. + // - Thread is in the thread list. + // - Thread is not linked into a different list. + bug!("failed to enqueue thread {} into RR scheduler.", uid); + } + } + reschedule(); + Ok(()) + } + + fn do_wakeups(&mut self, now: u64) { + while let Some(uid) = self.wakeup.min() { + let mut done = false; + WaiterView::::with(&mut self.threads, |view| { + if let Some(waiter) = view.get(uid) { + if waiter.until() > now { + Self::next_resched(now, waiter.until()); + done = true; + return; + } + + if let Err(_) = self.wakeup.remove(uid, view) { + bug!("failed to remove thread {} from wakeup tree.", uid); + } + } else { + bug!("failed to get thread {} from wakeup tree.", uid); + } + }); + + if done { + break; + } + + if self.enqueue(now, uid).is_err() { + bug!("failed to enqueue thread {} after wakeup.", uid); + } + } + } + + /// Syncs the new state after the last do_sched call to the scheduler, and returns whether we need to immediately reschedule. + fn sync_to_sched(&mut self, now: u64) -> bool { + let dt = now - self.last_tick; + self.last_tick = now; + + if let Some(old) = self.current { + let throttle = rt::ServerView::::with(&mut self.threads, |view| { + self.rt_scheduler.put(old, dt, view) + }); + + if let Some(throttle) = throttle { + self.sleep_until(throttle, now); + return true; + } + + self.rr_scheduler.put(old, dt as u32); + } + + self.do_wakeups(now); + false + } + + fn select_next(&mut self) -> (thread::UId, u32) { + rt::ServerView::::with(&mut self.threads, |view| self.rt_scheduler.pick(view)) + .or_else(|| self.rr_scheduler.pick(&mut self.threads)) + .unwrap_or((thread::IDLE_THREAD, 1000)) + } + + pub fn do_sched(&mut self, now: u64) -> Option<(*mut c_void, &mut task::Task)> { + // Sync the new state to the scheduler. + if self.sync_to_sched(now) { + // Trigger reschedule after interrupts are enabled. + return None; + } + + // Pick the next thread to run. + let (new, budget) = self.select_next(); + + // At this point, the task/thread must exist. Everything else is a bug. + let Some(thread) = self.threads.get(new) else { + bug!("failed to pick thread {}. Does not exist.", new); + }; + let (ctx, task_id) = (thread.ctx(), thread.task_id()); + + let Some(task) = self.tasks.get_mut(task_id) else { + bug!("failed to get task {}. Does not exist.", task_id); + }; + + // We don't need to resched if the thread has budget. + self.current = Some(new); + Self::next_resched(now, now.saturating_add(budget as u64)); + Some((ctx, task)) + } + + pub fn sleep_until(&mut self, until: u64, now: u64) -> Result<()> { + if until <= now { + return Ok(()); + } + let uid = self.current.ok_or(kerr!(InvalidArgument))?; - res + if let Some(thread) = self.threads.get_mut(uid) { + thread.set_waiter(Some(Waiter::new(until, uid))); + } else { + // This should not be possible. The thread must exist since it's the current thread. + bug!( + "failed to put current thread {} to sleep. Does not exist.", + uid + ); + } + + if self + .wakeup + .insert(uid, &mut WaiterView::::new(&mut self.threads)) + .is_err() + { + // This should not be possible. The thread exists. + bug!("failed to insert thread {} into wakeup tree.", uid); + } + + self.dequeue(uid); + reschedule(); + Ok(()) + } + + pub fn kick(&mut self, uid: thread::UId) -> Result<()> { + WaiterView::::with(&mut self.threads, |view| { + self.wakeup.remove(uid, view)?; + let thread = view.get_mut(uid).unwrap_or_else(|| { + bug!("failed to get thread {} from wakeup tree.", uid); + }); + thread.set_until(0); + self.wakeup.insert(uid, view).unwrap_or_else(|_| { + bug!("failed to re-insert thread {} into wakeup tree.", uid); + }); + Ok(()) + }) + } + + pub fn dequeue(&mut self, uid: thread::UId) { + rt::ServerView::::with(&mut self.threads, |view| { + self.rt_scheduler.dequeue(uid, view); + }); + self.rr_scheduler.dequeue(uid, &mut self.threads); + } + + pub fn create_task(&mut self, task: &task::Attributes) -> Result { + let uid = task::UId::new(self.id_gen).ok_or(kerr!(InvalidArgument))?; + self.id_gen += 1; + + self.tasks.insert(&uid, task::Task::new(uid, task)?)?; + Ok(uid) + } + + pub fn kill_task(&mut self, uid: task::UId) -> Result<()> { + let task = self.tasks.get_mut(uid).ok_or(kerr!(InvalidArgument))?; + + while let Some(id) = task.threads().head() { + // Borrow checker... + rt::ServerView::::with(&mut self.threads, |view| { + self.rt_scheduler.dequeue(id, view); + }); + self.rr_scheduler.dequeue(id, &mut self.threads); + self.wakeup + .remove(id, &mut WaiterView::::new(&mut self.threads)); + + if task.threads_mut().remove(id, &mut self.threads).is_err() { + // This should not be possible. The thread ID is from the thread list of the task, so it must exist. + bug!("failed to remove thread {} from task {}.", id, uid); + } + + if self.threads.remove(&id).is_none() { + // This should not be possible. The thread ID is from the thread list of the task, so it must exist. + bug!("failed to remove thread {} from thread list.", id); + } + + if Some(id) == self.current { + self.current = None; + reschedule(); + } + } + + self.tasks.remove(&uid).ok_or(kerr!(InvalidArgument))?; + Ok(()) + } + + pub fn create_thread( + &mut self, + task: Option, + attrs: &thread::Attributes, + ) -> Result { + let task = match task { + Some(t) => t, + None => self.current.ok_or(kerr!(InvalidArgument))?.owner(), + }; + let task = self.tasks.get_mut(task).ok_or(kerr!(InvalidArgument))?; + let uid = task.create_thread(self.id_gen, attrs, &mut self.threads)?; + + self.id_gen += 1; + Ok(uid) + } + + pub fn kill_thread(&mut self, uid: Option) -> Result<()> { + let uid = uid.unwrap_or(self.current.ok_or(kerr!(InvalidArgument))?); + self.dequeue(uid); + self.wakeup + .remove(uid, &mut WaiterView::::new(&mut self.threads)); + + self.tasks + .get_mut(uid.tid().owner()) + .ok_or(kerr!(InvalidArgument))? + .threads_mut() + .remove(uid, &mut self.threads)?; + + self.threads.remove(&uid).ok_or(kerr!(InvalidArgument))?; + + if Some(uid) == self.current { + self.current = None; + reschedule(); + } + Ok(()) + } +} + +pub fn with T>(f: F) -> T { + sync::atomic::irq_free(|| { + let mut sched = SCHED.lock(); + f(&mut sched) + }) } -pub fn enable_scheduler(enable: bool) { - scheduler::set_enabled(enable); +pub fn init(kaddr_space: mem::vmm::AddressSpace) { + with(|sched| { + let uid = task::KERNEL_TASK; + if let Ok(task) = task::Task::from_addr_space(uid, kaddr_space) { + if sched.tasks.insert(&uid, task).is_err() { + panic!("failed to create kernel task."); + } + } else { + panic!("failed to create kernel address space."); + } + }) } -pub fn tick_scheduler() -> bool { - if !scheduler::enabled() { +pub fn needs_reschedule(now: u64) -> bool { + if DISABLED.load(Ordering::Acquire) { return false; } - scheduler::SCHEDULER.lock().tick() + now >= NEXT_TICK.load(Ordering::Acquire) +} + +#[inline] +pub fn disable() { + DISABLED.store(true, Ordering::Release); +} + +#[inline] +pub fn enable() { + DISABLED.store(false, Ordering::Release); +} + +/// Reschedule the tasks. +pub fn reschedule() { + if DISABLED.load(Ordering::Acquire) { + return; + } + + hal::Machine::trigger_reschedule(); +} + +/// cbindgen:ignore +/// cbindgen:no-export +#[unsafe(no_mangle)] +pub extern "C" fn sched_enter(mut ctx: *mut c_void) -> *mut c_void { + with(|sched| { + let old = sched.current.map(|c| c.owner()); + sched.land(ctx); + + if let Some((new, task)) = sched.do_sched(time::tick()) { + if old != Some(task.id) { + dispch::prepare(task); + } + ctx = new; + } + + ctx + }) } diff --git a/src/sched/dispch.rs b/src/sched/dispch.rs new file mode 100644 index 0000000..0f781b6 --- /dev/null +++ b/src/sched/dispch.rs @@ -0,0 +1,7 @@ +use super::task::Task; + +pub fn prepare(task: &mut Task) { + if task.id.is_kernel() { + // Change task priv. level in HAL. + } +} \ No newline at end of file diff --git a/src/sched/rr.rs b/src/sched/rr.rs new file mode 100644 index 0000000..1e966c9 --- /dev/null +++ b/src/sched/rr.rs @@ -0,0 +1,57 @@ +use crate::{ + error::Result, sched::thread::{self}, types::list::List +}; + +pub struct Scheduler { + queue: List, + + current: Option, + current_left: u32, + quantum: u32, +} + +impl Scheduler { + pub const fn new() -> Self { + // TODO: Make quantum configurable. + Self { queue: List::new(), current: None, current_left: 0, quantum: 1000 } + } + + pub fn enqueue(&mut self, uid: thread::UId, storage: &mut super::ThreadMap) -> Result<()> { + self.queue.push_back(uid, storage).map_err(|_| kerr!(InvalidArgument)) + } + + pub fn put(&mut self, uid: thread::UId, dt: u32) { + if let Some(current) = self.current { + if current == uid { + self.current_left = self.current_left.saturating_sub(dt); + } + } + } + + pub fn pick(&mut self, storage: &mut super::ThreadMap) -> Option<(thread::UId, u32)> { + match self.current { + Some(current) if self.current_left > 0 => return Some((current, self.current_left)), + Some(current) => { + self.queue.pop_front(storage); + self.queue.push_back(current, storage); + + self.current = self.queue.head(); + self.current_left = self.quantum; + } + None => { + self.current = self.queue.head(); + self.current_left = self.quantum; + } + } + + self.current.map(|id| (id, self.current_left)) + } + + pub fn dequeue(&mut self, uid: thread::UId, storage: &mut super::ThreadMap) { + self.queue.remove(uid, storage); + + if self.current == Some(uid) { + self.current = None; + } + } +} diff --git a/src/sched/rt.rs b/src/sched/rt.rs new file mode 100644 index 0000000..33799a6 --- /dev/null +++ b/src/sched/rt.rs @@ -0,0 +1,45 @@ +use crate::{types::{rbtree::RbTree, traits::{Get, GetMut}, view::ViewMut}, sched::{ThreadMap, thread::{self}}}; + +pub struct Scheduler { + edf: RbTree, +} + +pub type ServerView<'a, const N: usize> = ViewMut<'a, thread::UId, thread::RtServer, ThreadMap>; + +impl Scheduler { + pub const fn new() -> Self { + Self { + edf: RbTree::new(), + } + } + + pub fn enqueue(&mut self, uid: thread::UId, now: u64, storage: &mut ServerView) { + if let Some(server) = storage.get_mut(uid) { + // Threads are only enqueued when they are runnable. + server.on_wakeup(now); + self.edf.insert(uid, storage); + } + } + + /// This should be called on each do_schedule call, to update the internal scheduler state. + /// If this function returns Some(u64) it means the current thread has exhausted its budget and should be throttled until the returned timestamp. + pub fn put(&mut self, uid: thread::UId, dt: u64, storage: &mut ServerView) -> Option { + if Some(uid) == self.edf.min() { + if let Some(server) = storage.get_mut(uid) { + return server.consume(dt); + } else { + bug!("thread {} not found in storage", uid); + } + } + + None + } + + pub fn pick(&mut self, storage: &mut ServerView) -> Option<(thread::UId, u32)> { + self.edf.min().and_then(|id| storage.get(id).map(|s| (id, s.budget()))) + } + + pub fn dequeue(&mut self, uid: thread::UId, storage: &mut ServerView) { + self.edf.remove(uid, storage); + } +} \ No newline at end of file diff --git a/src/sched/scheduler.rs b/src/sched/scheduler.rs deleted file mode 100644 index aded20b..0000000 --- a/src/sched/scheduler.rs +++ /dev/null @@ -1,206 +0,0 @@ -//! The scheduler module is responsible for managing the tasks and threads in the system. -//! It provides the necessary functions to create tasks and threads, and to switch between them. - -use core::{ffi::c_void, sync::atomic::AtomicBool}; - -use super::task::{Task, TaskId}; -use crate::{ - mem::{self, array::IndexMap, heap::BinaryHeap, queue::Queue}, - sched::{ - task::TaskDescriptor, - thread::{RunState, ThreadMap, ThreadUId, Timing}, - }, - sync::spinlock::SpinLocked, - utils, -}; - -/// The global scheduler instance. -pub static SCHEDULER: SpinLocked = SpinLocked::new(Scheduler::new()); -static SCHEDULER_ENABLED: AtomicBool = AtomicBool::new(false); - -/// The scheduler struct. It keeps track of the tasks and threads in the system. -/// This scheduler is a simple Rate Monotonic Scheduler (RMS) implementation. -#[derive(Debug)] -pub struct Scheduler { - /// The current running thread. - current: Option, - /// Fast interval store. This gets updated every time a new thread is selected. - current_interval: usize, - /// Stores the tasks in the system. - user_tasks: IndexMap, - /// Stores the threads in the system. - threads: ThreadMap<8>, - /// The priority queue that yields the next thread to run. - queue: BinaryHeap<(usize, ThreadUId), 32>, - /// The callbacks queue that stores the threads that need to be fired in the future. - callbacks: Queue<(ThreadUId, usize), 32>, - /// The progression of the time interval of the scheduler. - time: usize, -} - -impl Scheduler { - /// Create a new scheduler instance. - pub const fn new() -> Self { - Self { - current: None, - current_interval: 0, - user_tasks: IndexMap::new(), - threads: ThreadMap::new(), - queue: BinaryHeap::new(), - callbacks: Queue::new(), - time: 0, - } - } - - pub fn create_task(&mut self, desc: TaskDescriptor) -> Result { - let size = mem::align_up(desc.mem_size); - let idx = self - .user_tasks - .find_empty() - .ok_or(utils::KernelError::OutOfMemory)?; - let task_id = TaskId::new_user(idx); - - let task = Task::new(size, task_id)?; - self.user_tasks.insert(&idx, task)?; - Ok(task_id) - } - - pub fn create_thread( - &mut self, - entry: extern "C" fn(), - fin: Option !>, - timing: Timing, - task_id: TaskId, - ) -> Result { - let task_idx: usize = task_id.into(); - - if let Some(task) = self.user_tasks.get_mut(&task_idx) { - let desc = task.create_thread(entry, fin, timing)?; - let id = self.threads.create(desc)?; - self.queue.push((timing.period, id))?; - Ok(id) - } else { - Err(utils::KernelError::InvalidArgument) - } - } - - /// Updates the current thread context with the given context. - /// - /// `ctx` - The new context to update the current thread with. - fn update_current_ctx(&mut self, ctx: *mut c_void) { - if let Some(id) = self.current - && let Some(thread) = self.threads.get_mut(&id) - { - thread - .update_sp(ctx) - .expect("Failed to update thread context"); - } - } - - /// Selects a new thread to run, sets the previous thread as ready, and sets the new thread as runs. - /// The old thread will be added to the queue to be fired in the next period. - /// The new thread will be selected based on the priority queue. - /// - /// Returns the context of the new thread to run, or `None` if no thread is available. - fn select_new_thread(&mut self) -> Option<*mut c_void> { - if let Some(id) = self.queue.pop().map(|(_, id)| id) { - // Set the previous thread as ready. And add a callback from now. - if let Some(id) = self.current - && let Some(thread) = self.threads.get_mut(&id) - { - thread.update_run_state(RunState::Ready); - // The delay that is already in the queue. - let delay = self.callbacks.back().map(|(_, delay)| *delay).unwrap_or(0); - // Check if the period is already passed. - if thread.timing().period > (self.time + delay) { - // Add the callback to the queue. If it fails, we can't do much. - let _ = self - .callbacks - .push_back((id, thread.timing().period - (self.time + delay))); - } else { - // If the period is already passed, add it to the queue immediately. - let _ = self.queue.push((thread.timing().exec_time, id)); - } - } - - if let Some(thread) = self.threads.get_mut(&id) { - thread.update_run_state(RunState::Runs); - - // Set the new thread as the current one. - self.current_interval = thread.timing().exec_time; - self.current = Some(id); - - // Return the new thread context. - return Some(thread.sp()); - } - } - - None - } - - /// Fires the thread if necessary. - /// - /// Returns `true` if a thread was fired, otherwise `false`. - fn fire_thread_if_necessary(&mut self) -> bool { - let mut found = false; - while let Some((id, cnt)) = self.callbacks.front().cloned() { - // If the delay is 0, we can fire the thread. - if cnt - 1 == 0 { - self.callbacks.pop_front(); - if let Some(thread) = self.threads.get_mut(&id) { - thread.update_run_state(RunState::Ready); - - let _ = self.queue.push((thread.timing().exec_time, id)); - found = true; - } - } else { - // If the delay is not 0, we need to update the delay and reinsert it. - let _ = self.callbacks.insert(0, (id, cnt - 1)); - break; - } - } - - found - } - - /// Ticks the scheduler. This function is called every time the system timer ticks. - pub fn tick(&mut self) -> bool { - self.time += 1; - - // If a thread was fired, we need to reschedule. - if self.fire_thread_if_necessary() { - return true; - } - - // If the current thread is done, we need to reschedule. - if self.time >= self.current_interval { - self.time = 0; - return true; - } - - false - } -} - -pub fn enabled() -> bool { - SCHEDULER_ENABLED.load(core::sync::atomic::Ordering::Acquire) -} - -pub fn set_enabled(enabled: bool) { - SCHEDULER_ENABLED.store(enabled, core::sync::atomic::Ordering::Release); -} - -/// cbindgen:ignore -/// cbindgen:no-export -#[unsafe(no_mangle)] -pub extern "C" fn sched_enter(ctx: *mut c_void) -> *mut c_void { - { - let mut scheduler = SCHEDULER.lock(); - - // Update the current context. - scheduler.update_current_ctx(ctx); - - // Select a new thread to run, if available. - scheduler.select_new_thread().unwrap_or(ctx) - } -} diff --git a/src/sched/task.rs b/src/sched/task.rs index 7f6deb8..3c74835 100644 --- a/src/sched/task.rs +++ b/src/sched/task.rs @@ -1,189 +1,144 @@ //! This module provides the basic task and thread structures for the scheduler. +use core::borrow::Borrow; +use core::fmt::Display; use core::num::NonZero; -use core::ops::Range; -use core::ptr::NonNull; +use envparse::parse_env; use hal::Stack; use hal::stack::Stacklike; -use crate::mem; +use crate::error::Result; +use crate::sched::{ThreadMap, thread}; +use crate::types::list; +use crate::{mem, sched}; -use crate::mem::alloc::{Allocator, BestFitAllocator}; -use crate::sched::thread::{ThreadDescriptor, ThreadId, Timing}; -use crate::utils::KernelError; +use crate::mem::vmm::AddressSpacelike; +use crate::types::traits::ToIndex; + +pub struct Defaults { + pub stack_pages: usize, +} + +const DEFAULTS: Defaults = Defaults { + stack_pages: parse_env!("OSIRIS_STACKPAGES" as usize), +}; + +pub const KERNEL_TASK: UId = UId { uid: 0 }; /// Id of a task. This is unique across all tasks. -#[repr(u16)] -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)] -pub enum TaskId { - // Task with normal user privileges in user mode. - User(usize), - // Task with kernel privileges in user mode. - Kernel(usize), +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] +pub struct UId { + uid: usize, } -#[allow(dead_code)] -impl TaskId { - /// Check if the task is a user task. - pub fn is_user(&self) -> bool { - matches!(self, TaskId::User(_)) +impl UId { + pub fn new(uid: usize) -> Option { + if uid == 0 { None } else { Some(Self { uid }) } } - /// Check if the task is a kernel task. pub fn is_kernel(&self) -> bool { - matches!(self, TaskId::Kernel(_)) - } - - pub fn new_user(id: usize) -> Self { - TaskId::User(id) + self.uid == 0 } +} - pub fn new_kernel(id: usize) -> Self { - TaskId::Kernel(id) +impl ToIndex for UId { + fn to_index>(idx: Option) -> usize { + idx.as_ref().map_or(0, |uid| uid.borrow().uid) } } -impl From for usize { - fn from(val: TaskId) -> Self { - match val { - TaskId::User(id) => id, - TaskId::Kernel(id) => id, - } +impl Display for UId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.uid) } } -/// Descibes a task. -pub struct TaskDescriptor { - /// The size of the memory that the task requires. - pub mem_size: usize, +pub struct Attributes { + pub resrv_pgs: Option>, } /// The struct representing a task. -#[derive(Debug)] pub struct Task { /// The unique identifier of the task. - pub id: TaskId, - /// The memory of the task. - memory: TaskMemory, + pub id: UId, /// The counter for the thread ids. tid_cntr: usize, - /// The threads associated with the task. - threads: mem::array::Vec, + /// Sets up the memory for the task. + address_space: mem::vmm::AddressSpace, + /// The threads belonging to this task. + threads: list::List, } impl Task { - /// Create a new task. - /// - /// `memory_size` - The size of the memory that the task requires. - /// - /// Returns a new task if the task was created successfully, or an error if the task could not be created. - pub fn new(memory_size: usize, id: TaskId) -> Result { - let memory = TaskMemory::new(memory_size)?; - let threads = mem::array::Vec::new(); + pub fn new(id: UId, attrs: &Attributes) -> Result { + // TODO: On MMU systems, the resrv_pgs attribute will be ignored, as memory will not be reserved. + let resrv_pgs = attrs.resrv_pgs.ok_or(kerr!(InvalidArgument))?; + let address_space = mem::vmm::AddressSpace::new(resrv_pgs.get())?; + Self::from_addr_space(id, address_space) + } + pub fn from_addr_space(id: UId, address_space: mem::vmm::AddressSpace) -> Result { Ok(Self { id, - memory, + address_space, tid_cntr: 0, - threads, + threads: list::List::new(), }) } - fn allocate_tid(&mut self) -> ThreadId { + fn allocate_tid(&mut self) -> sched::thread::Id { let tid = self.tid_cntr; self.tid_cntr += 1; - ThreadId::new(tid, self.id) + sched::thread::Id::new(tid, self.id) + } + + fn allocate_stack(&mut self, attrs: &thread::Attributes) -> Result { + let size = DEFAULTS.stack_pages * mem::pfa::PAGE_SIZE; + let region = mem::vmm::Region::new( + None, + size, + mem::vmm::Backing::Uninit, + mem::vmm::Perms::Read | mem::vmm::Perms::Write, + ); + let pa = self.address_space.map(region)?; + + Ok(hal::stack::Descriptor { + top: pa + size, + size: NonZero::new(size).unwrap(), + entry: attrs.entry, + fin: attrs.fin, + }) } - pub fn create_thread( + pub fn create_thread( &mut self, - entry: extern "C" fn(), - fin: Option !>, - timing: Timing, - ) -> Result { - // Safe unwrap because stack size is non zero. - // TODO: Make this configurable - let stack_size = NonZero::new(4096usize).unwrap(); - // TODO: Revert if error occurs - let stack_mem = self.memory.malloc(stack_size.into(), align_of::())?; - let stack_top = unsafe { stack_mem.byte_add(stack_size.get()) }; - - let stack = hal::stack::StackDescriptor { - top: stack_top, - size: stack_size, - entry, - fin, - }; + uid: usize, + attrs: &thread::Attributes, + storage: &mut ThreadMap, + ) -> Result { + let stack = self.allocate_stack(attrs)?; let stack = unsafe { Stack::new(stack) }?; - let tid = self.allocate_tid(); + let new = sched::thread::Thread::new(tid.get_uid(uid), stack, attrs.attrs); + storage.insert(&tid.get_uid(uid), new)?; + self.threads.push_back(tid.get_uid(uid), storage)?; - // TODO: Revert if error occurs - self.register_thread(tid)?; - - Ok(ThreadDescriptor { tid, stack, timing }) + Ok(tid.get_uid(uid)) } - /// Register a thread with the task. - /// - /// `thread_id` - The id of the thread to register. - /// - /// Returns `Ok(())` if the thread was registered successfully, or an error if the thread could not be registered. TODO: Check if the thread is using the same memory as the task. - fn register_thread(&mut self, thread_id: ThreadId) -> Result<(), KernelError> { - self.threads.push(thread_id) + pub fn tid_cntr(&self) -> usize { + self.tid_cntr } -} - -/// The memory of a task. -#[derive(Debug)] -pub struct TaskMemory { - /// The beginning of the memory. - begin: NonNull, - /// The size of the memory. - size: usize, - - /// The allocator for the task's memory. - alloc: BestFitAllocator, -} -#[allow(dead_code)] -impl TaskMemory { - /// Create a new task memory. - /// - /// `size` - The size of the memory. - /// - /// Returns a new task memory if the memory was created successfully, or an error if the memory could not be created. - pub fn new(size: usize) -> Result { - let begin = mem::malloc(size, align_of::()).ok_or(KernelError::OutOfMemory)?; - - let mut alloc = BestFitAllocator::new(); - let range = Range { - start: begin.as_ptr() as usize, - end: begin.as_ptr() as usize + size, - }; - - if let Err(e) = unsafe { alloc.add_range(range) } { - unsafe { mem::free(begin, size) }; - return Err(e); - } - - Ok(Self { begin, size, alloc }) + pub fn threads_mut(&mut self) -> &mut list::List { + &mut self.threads } - pub fn malloc(&mut self, size: usize, align: usize) -> Result, KernelError> { - self.alloc.malloc(size, align) - } - - pub fn free(&mut self, ptr: NonNull, size: usize) { - unsafe { self.alloc.free(ptr, size) } - } -} - -impl Drop for TaskMemory { - fn drop(&mut self) { - unsafe { mem::free(self.begin, self.size) }; + pub fn threads(&self) -> &list::List { + &self.threads } } diff --git a/src/sched/thread.rs b/src/sched/thread.rs index 647d5b9..b5ad71f 100644 --- a/src/sched/thread.rs +++ b/src/sched/thread.rs @@ -1,22 +1,40 @@ // ----------------------------------- Identifiers ----------------------------------- +use core::fmt::Display; use core::{borrow::Borrow, ffi::c_void}; -use hal::Stack; -use hal::stack::Stacklike; - -use crate::{mem::array::IndexMap, sched::task::TaskId, utils::KernelError}; +use hal::stack::{FinFn, Stacklike}; +use hal::{Stack, stack::EntryFn}; +use proc_macros::TaggedLinks; + +use crate::error::Result; +use crate::sched::task::{self, KERNEL_TASK}; +use crate::types::list; +use crate::types::{ + rbtree::{self, Compare}, + traits::{Project, ToIndex}, +}; +use crate::uapi; + +pub const IDLE_THREAD: UId = UId { + uid: 1, + tid: Id { + id: 0, + owner: KERNEL_TASK, + }, +}; /// Id of a task. This is only unique within a Task. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Eq, Ord)] -pub struct ThreadId { +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)] +pub struct Id { id: usize, - owner: TaskId, + owner: task::UId, } #[allow(dead_code)] -impl ThreadId { - pub fn new(id: usize, owner: TaskId) -> Self { +impl Id { + pub fn new(id: usize, owner: task::UId) -> Self { Self { id, owner } } @@ -24,86 +42,84 @@ impl ThreadId { self.id } - pub fn owner(&self) -> TaskId { + pub fn owner(&self) -> task::UId { self.owner } - pub fn get_uid(&self, uid: usize) -> ThreadUId { - ThreadUId { uid, tid: *self } + pub fn get_uid(&self, uid: usize) -> UId { + UId { uid, tid: *self } } } /// Unique identifier for a thread. Build from TaskId and ThreadId. -#[derive(Clone, Copy, Debug)] +#[proc_macros::fmt] +#[derive(Clone, Copy)] #[allow(dead_code)] -pub struct ThreadUId { +pub struct UId { + /// A globally unique identifier for the thread. uid: usize, - tid: ThreadId, + /// The task-local identifier for the thread. + tid: Id, } #[allow(dead_code)] -impl ThreadUId { - pub fn tid(&self) -> ThreadId { +impl UId { + pub fn tid(&self) -> Id { self.tid } + + pub fn as_usize(&self) -> usize { + self.uid + } + + pub fn owner(&self) -> task::UId { + self.tid.owner() + } } -impl PartialEq for ThreadUId { +impl PartialEq for UId { fn eq(&self, other: &Self) -> bool { self.uid == other.uid } } -impl Eq for ThreadUId {} +impl Eq for UId {} -impl Borrow for ThreadUId { - fn borrow(&self) -> &usize { - &self.uid +impl Into for UId { + fn into(self) -> usize { + self.uid } } -impl Default for ThreadUId { - fn default() -> Self { - Self { - uid: 0, - tid: ThreadId::new(0, TaskId::User(0)), - } - } -} - -impl PartialOrd for ThreadUId { +impl PartialOrd for UId { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for ThreadUId { +impl Ord for UId { fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.uid.cmp(&other.uid) } } -// ------------------------------------------------------------------------- - -pub struct ThreadDescriptor { - pub tid: ThreadId, - pub stack: Stack, - pub timing: Timing, +impl ToIndex for UId { + fn to_index>(idx: Option) -> usize { + idx.as_ref().map_or(0, |k| k.borrow().uid) + } } -/// The timing information for a thread. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Timing { - /// The period of the thread after which it should run again. - pub period: usize, - /// The deadline of the thread. - pub deadline: usize, - /// The execution time of the thread. (How much cpu time it needs) - pub exec_time: usize, +impl Display for UId { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}-{}", self.tid.owner(), self.tid.as_usize()) + } } +// ------------------------------------------------------------------------- + /// The state of a thread. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, Eq)] #[allow(dead_code)] pub enum RunState { /// The thread is currently using the cpu. @@ -114,22 +130,176 @@ pub enum RunState { Waits, } -#[derive(Debug)] -pub struct ThreadState { +#[proc_macros::fmt] +#[derive(Clone, Copy)] +pub struct State { run_state: RunState, stack: Stack, } +#[proc_macros::fmt] +#[derive(Clone, Copy, TaggedLinks)] +pub struct RtServer { + budget: u32, + budget_left: u32, + period: u32, + deadline: u64, + + // Back-reference to the thread uid. + uid: UId, + + /// Real-time tree links for the server. + #[rbtree(tag = RtTree, idx = UId)] + _rt_links: rbtree::Links, +} + +impl RtServer { + pub fn new(budget: u32, period: u32, deadline: u64, uid: UId) -> Self { + Self { + budget, + budget_left: budget, + period, + deadline, + uid, + _rt_links: rbtree::Links::new(), + } + } + + pub fn budget_left(&self) -> u32 { + self.budget_left + } + + pub fn budget(&self) -> u32 { + self.budget + } + + fn violates_sched(&self, now: u64) -> bool { + self.budget_left as u64 * self.period as u64 + > self.budget as u64 * (self.deadline.saturating_sub(now)) + } + + pub fn on_wakeup(&mut self, now: u64) { + if self.deadline <= now || self.violates_sched(now) { + self.deadline = now + self.period as u64; + self.budget_left = self.budget; + } + } + + pub fn replenish(&mut self) { + self.deadline = self.deadline + self.period as u64; + self.budget_left += self.budget; + } + + pub fn consume(&mut self, dt: u64) -> Option { + self.budget_left = self.budget_left.saturating_sub(dt as u32); + + if self.budget_left == 0 { + return Some(self.deadline); + } + + None + } + + pub fn deadline(&self) -> u64 { + self.deadline + } + + pub fn uid(&self) -> UId { + self.uid + } +} + +impl Compare for RtServer { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + let ord = self.deadline.cmp(&other.deadline); + + if ord == core::cmp::Ordering::Equal { + self.uid.cmp(&other.uid) + } else { + ord + } + } +} + +#[proc_macros::fmt] +#[derive(Clone, Copy, TaggedLinks)] +pub struct Waiter { + /// The time when the Thread will be awakened. + until: u64, + + // Back-reference to the thread uid. + uid: UId, + /// Wakup tree links for the thread. + #[rbtree(tag = WakupTree, idx = UId)] + _wakeup_links: rbtree::Links, +} + +impl Waiter { + pub fn new(until: u64, uid: UId) -> Self { + Self { + until, + uid, + _wakeup_links: rbtree::Links::new(), + } + } + + pub fn until(&self) -> u64 { + self.until + } + + pub fn set_until(&mut self, until: u64) { + self.until = until; + } +} + +impl Compare for Waiter { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + match self.until.cmp(&other.until) { + core::cmp::Ordering::Equal => self.uid.cmp(&other.uid), + ord => ord, + } + } +} + +#[proc_macros::fmt] +#[derive(Clone, Copy)] +pub struct WakupTree; +#[proc_macros::fmt] +#[derive(Clone, Copy)] +pub struct RtTree; + +#[proc_macros::fmt] +#[derive(Clone, Copy)] +pub struct RRList; + +#[proc_macros::fmt] +#[derive(Clone, Copy)] +pub struct ThreadList; + +pub struct Attributes { + pub entry: EntryFn, + pub fin: Option, + pub attrs: Option, +} + /// The struct representing a thread. -#[derive(Debug)] -#[allow(dead_code)] +#[proc_macros::fmt] +#[derive(Clone, Copy, TaggedLinks)] pub struct Thread { /// The current state of the thread. - state: ThreadState, - /// The timing constraints of the thread. - timing: Timing, + state: State, /// The unique identifier of the thread. - tuid: ThreadUId, + uid: UId, + /// If the thread is real-time, its contains a constant bandwidth server. + rt_server: Option, + + waiter: Option, + + #[list(tag = RRList, idx = UId)] + rr_links: list::Links, + + #[list(tag = ThreadList, idx = UId)] + thread_links: list::Links, } #[allow(dead_code)] @@ -137,74 +307,104 @@ impl Thread { /// Create a new thread. /// /// `stack` - The stack of the thread. - /// `timing` - The timing constraints of the thread. /// /// Returns a new thread. - fn new(tuid: ThreadUId, stack: Stack, timing: Timing) -> Self { + pub fn new(uid: UId, stack: Stack, rtattrs: Option) -> Self { + let server = + rtattrs.map(|attrs| RtServer::new(attrs.budget, attrs.period, attrs.deadline, uid)); Self { - state: ThreadState { + state: State { run_state: RunState::Ready, stack, }, - timing, - tuid, + uid, + rt_server: server, + waiter: None, + rr_links: list::Links::new(), + thread_links: list::Links::new(), } } - pub fn update_sp(&mut self, sp: *mut c_void) -> Result<(), KernelError> { - let sp = self.state.stack.create_sp(sp)?; + pub fn set_waiter(&mut self, waiter: Option) { + self.waiter = waiter; + } + + pub fn waiter(&self) -> Option<&Waiter> { + self.waiter.as_ref() + } + + pub fn save_ctx(&mut self, ctx: *mut c_void) -> Result<()> { + let sp = self.state.stack.create_sp(ctx)?; self.state.stack.set_sp(sp); Ok(()) } - pub fn update_run_state(&mut self, state: RunState) { + pub fn set_run_state(&mut self, state: RunState) { self.state.run_state = state; } - pub fn timing(&self) -> &Timing { - &self.timing + pub fn rt_server(&self) -> Option<&RtServer> { + self.rt_server.as_ref() } - pub fn sp(&self) -> *mut c_void { + pub fn ctx(&self) -> *mut c_void { self.state.stack.sp() } - pub fn tuid(&self) -> ThreadUId { - self.tuid + pub fn uid(&self) -> UId { + self.uid + } + + pub fn task_id(&self) -> task::UId { + self.uid.tid().owner() } } -#[derive(Debug)] -pub struct ThreadMap { - map: IndexMap, +impl PartialEq for Thread { + fn eq(&self, other: &Self) -> bool { + self.uid == other.uid + } } -#[allow(dead_code)] -impl ThreadMap { - pub const fn new() -> Self { - Self { - map: IndexMap::new(), - } +impl Project for Thread { + fn project(&self) -> Option<&RtServer> { + self.rt_server.as_ref() } - pub fn create(&mut self, desc: ThreadDescriptor) -> Result { - let idx = self.map.find_empty().ok_or(KernelError::OutOfMemory)?; - let tuid = desc.tid.get_uid(idx); - let thread = Thread::new(tuid, desc.stack, desc.timing); + fn project_mut(&mut self) -> Option<&mut RtServer> { + self.rt_server.as_mut() + } +} - self.map.insert(&tuid, thread)?; - Ok(tuid) +impl Project for Thread { + fn project(&self) -> Option<&Waiter> { + self.waiter.as_ref() } - pub fn get_mut(&mut self, id: &ThreadUId) -> Option<&mut Thread> { - self.map.get_mut(id) + fn project_mut(&mut self) -> Option<&mut Waiter> { + self.waiter.as_mut() } +} + +#[cfg(test)] +mod tests { + use super::RtServer; - pub fn get(&self, id: &ThreadUId) -> Option<&Thread> { - self.map.get(id) + fn make_server(budget: u32, period: u32, deadline: u64) -> RtServer { + let tid = super::Id::new(1, super::task::KERNEL_TASK); + let uid = tid.get_uid(1); + RtServer::new(budget, period, deadline, uid) } - pub fn remove(&mut self, id: &ThreadUId) -> Option { - self.map.remove(id) + #[test] + fn replenish_budget_overflow() { + // 2 * budget = 4_294_967_296 > u32::MAX → overflows. + // In release: wraps to 0, which is less than budget. + let budget: u32 = u32::MAX / 2 + 1; + + let mut server = make_server(budget, 1, 0); + + server.replenish(); + server.budget_left(); } } diff --git a/src/sync/atomic.rs b/src/sync/atomic.rs index 487125c..a1f54be 100644 --- a/src/sync/atomic.rs +++ b/src/sync/atomic.rs @@ -10,11 +10,20 @@ compile_error!( "The `atomic-cas` feature requires the target to have atomic operations on at least 8-bit integers." ); -// ----------------------------AtomicU8---------------------------- -#[cfg(all(feature = "no-atomic-cas"))] +#[allow(unused_imports)] pub use core::sync::atomic::Ordering; -#[cfg(all(feature = "no-atomic-cas"))] +#[inline(always)] +pub fn irq_free(f: impl FnOnce() -> T) -> T { + let state = hal::asm::disable_irq_save(); + let result = f(); + hal::asm::enable_irq_restr(state); + + result +} + +// ----------------------------AtomicU8---------------------------- +#[cfg(any(feature = "no-atomic-cas", not(target_has_atomic = "64")))] use core::cell::UnsafeCell; #[cfg(all(feature = "no-atomic-cas"))] @@ -106,3 +115,100 @@ impl AtomicBool { todo!("Implement atomic compare_exchange for bool"); } } + +// ----------------------------AtomicU64---------------------------- +#[allow(unused_imports)] +#[cfg(target_has_atomic = "64")] +pub use core::sync::atomic::AtomicU64; + +#[cfg(not(target_has_atomic = "64"))] +/// An atomic `u64` implemented by disabling interrupts around each operation. +pub struct AtomicU64 { + value: UnsafeCell, +} + +#[cfg(not(target_has_atomic = "64"))] +unsafe impl Sync for AtomicU64 {} + +#[cfg(not(target_has_atomic = "64"))] +impl AtomicU64 { + /// Creates a new atomic u64. + pub const fn new(value: u64) -> Self { + Self { + value: UnsafeCell::new(value), + } + } + + /// Loads the value. + pub fn load(&self, _: Ordering) -> u64 { + irq_free(|| { + // SAFETY: Interrupts are disabled, so this read is exclusive with writes. + unsafe { *self.value.get() } + }) + } + + /// Stores a value. + pub fn store(&self, value: u64, _: Ordering) { + irq_free(|| { + // SAFETY: Interrupts are disabled, so this write is exclusive with other access. + unsafe { + *self.value.get() = value; + } + }); + } + + /// Compares the value and exchanges it. + pub fn compare_exchange( + &self, + current: u64, + new: u64, + _: Ordering, + _: Ordering, + ) -> Result { + irq_free(|| { + // SAFETY: Interrupts are disabled, so this read-modify-write is exclusive. + unsafe { + let value = self.value.get(); + if *value == current { + *value = new; + Ok(current) + } else { + Err(*value) + } + } + }) + } + + /// Fetches and adds, returning the previous value. + pub fn fetch_add(&self, value: u64, _: Ordering) -> u64 { + irq_free(|| { + // SAFETY: Interrupts are disabled, so this read-modify-write is exclusive. + unsafe { + let ptr = self.value.get(); + let old = *ptr; + *ptr = old.wrapping_add(value); + old + } + }) + } + + /// Fetches a value, applies the function and writes it back atomically. + pub fn fetch_update(&self, _: Ordering, _: Ordering, mut f: F) -> Result + where + F: FnMut(u64) -> Option, + { + irq_free(|| { + // SAFETY: Interrupts are disabled, so this read-modify-write is exclusive. + unsafe { + let ptr = self.value.get(); + let old = *ptr; + if let Some(new) = f(old) { + *ptr = new; + Ok(old) + } else { + Err(old) + } + } + }) + } +} diff --git a/src/sync/spinlock.rs b/src/sync/spinlock.rs index 7e1b2de..14178cf 100644 --- a/src/sync/spinlock.rs +++ b/src/sync/spinlock.rs @@ -6,7 +6,7 @@ use core::sync::atomic::AtomicBool; use core::sync::atomic::Ordering; /// A mutual exclusion primitive, facilitating busy-waiting. -#[derive(Debug)] +#[proc_macros::fmt] pub struct SpinLock { lock: AtomicBool, } @@ -56,7 +56,7 @@ impl SpinLock { } /// A guard that releases the SpinLock when dropped. -#[derive(Debug)] +#[proc_macros::fmt] pub struct SpinLockGuard<'a, T: ?Sized> { lock: &'a SpinLock, value: NonNull, diff --git a/src/syscalls.rs b/src/syscalls.rs index eda3f92..d692f26 100644 --- a/src/syscalls.rs +++ b/src/syscalls.rs @@ -3,15 +3,16 @@ use core::ffi::{c_int, c_uint}; mod file; -mod tasks; +mod sched; // We need to import everything so that the macro is able to find the entry functions. use file::*; -use tasks::*; +use sched::*; #[unsafe(no_mangle)] pub extern "C" fn handle_syscall(number: usize, args: *const c_uint) -> c_int { + let number = number as u16; // All functions that are annotated with the #[syscall_handler(num = X)] macro are syscalls. // build.rs will generate a match statement that matches the syscall number to the function which is then included here. - include!(concat!(env!("OUT_DIR"), "/syscall_dispatcher.in")) + include!(concat!(env!("OUT_DIR"), "/syscall_match.in")) } diff --git a/src/syscalls/file.rs b/src/syscalls/file.rs index 65131b9..957a765 100644 --- a/src/syscalls/file.rs +++ b/src/syscalls/file.rs @@ -1,7 +1,5 @@ use core::{ffi::c_int, str}; - -use crate::kprintln; -use macros::syscall_handler; +use proc_macros::syscall_handler; #[syscall_handler(num = 0)] fn syscall_print(fd: usize, buf: *const u8, len: usize) -> c_int { diff --git a/src/syscalls/sched.rs b/src/syscalls/sched.rs new file mode 100644 index 0000000..258574e --- /dev/null +++ b/src/syscalls/sched.rs @@ -0,0 +1,72 @@ +//! This module provides task management related syscalls. + +use core::ffi::c_int; + +use proc_macros::syscall_handler; + +use crate::{sched, time, uapi::sched::RtAttrs}; + +#[syscall_handler(num = 1)] +fn sleep(until_hi: u32, until_lo: u32) -> c_int { + let until = ((until_hi as u64) << 32) | (until_lo as u64); + sched::with(|sched| { + if sched.sleep_until(until, time::tick()).is_err() { + bug!("no current thread set."); + } + }); + 0 +} + +#[syscall_handler(num = 2)] +fn sleep_for(duration_hi: u32, duration_lo: u32) -> c_int { + let duration = ((duration_hi as u64) << 32) | (duration_lo as u64); + sched::with(|sched| { + let now = time::tick(); + if sched.sleep_until(now + duration, now).is_err() { + bug!("no current thread set."); + } + }); + 0 +} + +#[syscall_handler(num = 3)] +fn spawn_thread(func_ptr: usize, attrs: *const RtAttrs) -> c_int { + sched::with(|sched| { + let attrs = if attrs.is_null() { + None + } else { + Some(unsafe { *attrs }) + }; + + let attrs = sched::thread::Attributes { + entry: unsafe { core::mem::transmute(func_ptr) }, + fin: None, + attrs, + }; + match sched.create_thread(None, &attrs) { + Ok(uid) => { + if sched.enqueue(time::tick(), uid).is_err() { + bug!("failed to enqueue thread."); + } + uid.as_usize() as c_int + } + Err(_) => -1, + } + }) +} + +#[syscall_handler(num = 4)] +fn exit(code: usize) -> c_int { + sched::with(|sched| { + if sched.kill_thread(None).is_err() { + bug!("failed to terminate thread."); + } + }); + 0 +} + +#[syscall_handler(num = 5)] +fn kick_thread(uid: usize) -> c_int { + 0 +} + diff --git a/src/syscalls/tasks.rs b/src/syscalls/tasks.rs deleted file mode 100644 index 7bd7b5a..0000000 --- a/src/syscalls/tasks.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! This module provides task management related syscalls. - -use core::ffi::c_int; - -use crate::sched; -use macros::syscall_handler; - -/// Syscall handler: reschedule. -/// This syscall is used to request a reschedule. -/// -/// No arguments are passed to this syscall. -#[syscall_handler(num = 1)] -fn syscall_reschedule() -> c_int { - sched::reschedule(); - 0 -} - -#[syscall_handler(num = 2)] -fn syscall_exec(entry: usize) -> c_int { - let entry: extern "C" fn() -> () = unsafe { core::mem::transmute(entry) }; - - let timing = sched::thread::Timing { - period: 8, - deadline: 8, - exec_time: 2, - }; - - sched::create_task(sched::task::TaskDescriptor { mem_size: 0 }) - .and_then(|task| sched::create_thread(task, entry, None, timing)) - .map(|_| 0) - .unwrap_or(-1) -} diff --git a/src/time.rs b/src/time.rs index f8a2fc4..44cc3a5 100644 --- a/src/time.rs +++ b/src/time.rs @@ -1,49 +1,29 @@ -use crate::{sched, sync::spinlock::SpinLocked}; -use hal::Schedable; +use hal::Machinelike; -// This variable is only allowed to be modified by the systick handler. -static TIME: SpinLocked = SpinLocked::new(0); +use crate::{sched, sync}; -fn tick() { - // Increment the global time counter. - { - let mut time = TIME.lock(); - *time += 1; - } +static TICKS: sync::atomic::AtomicU64 = sync::atomic::AtomicU64::new(0); + +pub fn tick() -> u64 { + TICKS.load(sync::atomic::Ordering::Acquire) } -/* - * Returns the current time in milliseconds after boot. - * - */ -#[allow(dead_code)] -pub fn time() -> u64 { - if !hal::asm::are_interrupts_enabled() { - // If interrupts are disabled, we can just read the time. - return *TIME.lock(); - } else { - let time; - // We need to disable interrupts to ensure that systick is always able to lock the time. - hal::asm::disable_interrupts(); - // Return the current time. - { - time = *TIME.lock(); - } - hal::asm::enable_interrupts(); - // Now systick can be called again. - time - } +pub fn mono_now() -> u64 { + // TODO: This will break on SMP systems without native u64 atomic store. + sync::atomic::irq_free(|| hal::Machine::monotonic_now() ) +} + +pub fn mono_freq() -> u64 { + hal::Machine::monotonic_freq() } /// cbindgen:ignore /// cbindgen:no-export #[unsafe(no_mangle)] pub extern "C" fn systick_hndlr() { - tick(); - - let resched = { sched::tick_scheduler() }; + let tick = TICKS.fetch_add(1, sync::atomic::Ordering::Release) + 1; - if resched { - hal::Machine::trigger_reschedule(); + if sched::needs_reschedule(tick) { + sched::reschedule(); } } diff --git a/src/types.rs b/src/types.rs new file mode 100644 index 0000000..746bf6d --- /dev/null +++ b/src/types.rs @@ -0,0 +1,9 @@ + +pub mod boxed; +pub mod array; +pub mod heap; +pub mod pool; +pub mod rbtree; +pub mod list; +pub mod traits; +pub mod view; \ No newline at end of file diff --git a/src/mem/array.rs b/src/types/array.rs similarity index 52% rename from src/mem/array.rs rename to src/types/array.rs index e7274db..1f08e56 100644 --- a/src/mem/array.rs +++ b/src/types/array.rs @@ -1,18 +1,28 @@ //! This module implements static and dynamic arrays for in-kernel use. -use super::boxed::Box; -use crate::utils::KernelError; +use crate::error::Result; + +use super::{ + traits::{Get, GetMut, ToIndex}, + boxed::Box, +}; + use core::{borrow::Borrow, mem::MaybeUninit}; +use core::{ + ops::{Index, IndexMut}, +}; /// This is a fixed-size map that can store up to N consecutive elements. -#[derive(Debug)] -pub struct IndexMap + Default, V, const N: usize> { +#[proc_macros::fmt] +pub struct IndexMap +{ data: [Option; N], phantom: core::marker::PhantomData, } #[allow(dead_code)] -impl + Default, V, const N: usize> IndexMap { +impl IndexMap +{ /// Create a new IndexMap. /// /// Returns a new IndexMap. @@ -23,50 +33,20 @@ impl + Default, V, const N: usize> IndexMap { } } - /// Get the element at the given index. - /// - /// `index` - The index to get the element from. - /// - /// Returns `Some(&T)` if the index is in-bounds, otherwise `None`. - pub fn get(&self, index: &K) -> Option<&V> { - let index = *index.borrow(); - - if index < N { - self.data[index].as_ref() - } else { - None - } - } - - /// Get a mutable reference to the element at the given index. - /// - /// `index` - The index to get the element from. - /// - /// Returns `Some(&mut T)` if the index is in-bounds, otherwise `None`. - pub fn get_mut(&mut self, index: &K) -> Option<&mut V> { - let index = *index.borrow(); - - if index < N { - self.data[index].as_mut() - } else { - None - } - } - /// Insert a value at the given index. /// /// `index` - The index to insert the value at. /// `value` - The value to insert. /// /// Returns `Ok(())` if the index was in-bounds, otherwise `Err(KernelError::OutOfMemory)`. - pub fn insert(&mut self, index: &K, value: V) -> Result<(), KernelError> { - let index = *index.borrow(); + pub fn insert(&mut self, idx: &K, value: V) -> Result<()> { + let idx = K::to_index(Some(idx)); - if index < N { - self.data[index] = Some(value); + if idx < N { + self.data[idx] = Some(value); Ok(()) } else { - Err(KernelError::OutOfMemory) + Err(kerr!(OutOfMemory)) } } @@ -75,7 +55,7 @@ impl + Default, V, const N: usize> IndexMap { /// `value` - The value to insert. /// /// Returns `Ok(index)` if the value was inserted, otherwise `Err(KernelError::OutOfMemory)`. - pub fn insert_next(&mut self, value: V) -> Result { + pub fn insert_next(&mut self, value: V) -> Result { for (i, slot) in self.data.iter_mut().enumerate() { if slot.is_none() { *slot = Some(value); @@ -83,7 +63,7 @@ impl + Default, V, const N: usize> IndexMap { } } - Err(KernelError::OutOfMemory) + Err(kerr!(OutOfMemory)) } /// Remove the value at the given index. @@ -91,11 +71,11 @@ impl + Default, V, const N: usize> IndexMap { /// `index` - The index to remove the value from. /// /// Returns the value if it was removed, otherwise `None`. - pub fn remove(&mut self, index: &K) -> Option { - let index = *index.borrow(); + pub fn remove(&mut self, idx: &K) -> Option { + let idx = K::to_index(Some(idx)); - if index < N { - self.data[index].take() + if idx < N { + self.data[idx].take() } else { None } @@ -113,8 +93,8 @@ impl + Default, V, const N: usize> IndexMap { /// `index` - The index to start the iterator from. /// /// Returns an iterator over the elements in the map. - pub fn iter_from_cycle(&self, index: &K) -> impl Iterator> { - self.data.iter().cycle().skip(index.borrow() + 1) + pub fn iter_from_cycle(&self, idx: Option<&K>) -> impl Iterator> { + self.data.iter().cycle().skip(K::to_index(idx) + 1) } /// Get the next index that contains a value (this will cycle). @@ -122,19 +102,33 @@ impl + Default, V, const N: usize> IndexMap { /// `index` - The index to start the search from. /// /// Returns the next index (potentially < index) that contains a value, otherwise `None`. - pub fn next(&self, index: Option<&K>) -> Option { - let default = K::default(); - let index = index.unwrap_or(&default); - - for (i, elem) in self.iter_from_cycle(index).enumerate() { + pub fn next(&self, idx: Option<&K>) -> Option { + for (i, elem) in self.iter_from_cycle(idx).enumerate() { if elem.is_some() { - return Some((index.borrow() + i + 1) % N); + let idx = K::to_index(idx); + return Some((idx + i + 1) % N); } } None } + pub fn raw_at(&self, idx: usize) -> Option<&V> { + if idx < N { + self.data[idx].as_ref() + } else { + None + } + } + + pub fn raw_at_mut(&mut self, idx: usize) -> Option<&mut V> { + if idx < N { + self.data[idx].as_mut() + } else { + None + } + } + pub fn find_empty(&self) -> Option { for (i, slot) in self.data.iter().enumerate() { if slot.is_none() { @@ -146,8 +140,95 @@ impl + Default, V, const N: usize> IndexMap { } } +impl Index for IndexMap +{ + type Output = V; + + fn index(&self, index: K) -> &Self::Output { + self.get::(index).unwrap() + } +} + +impl IndexMut for IndexMap +{ + fn index_mut(&mut self, index: K) -> &mut Self::Output { + self.get_mut::(index).unwrap() + } +} + +impl Get for IndexMap +{ + type Output = V; + + fn get>(&self, index: Q) -> Option<&Self::Output> { + let idx = K::to_index(Some(index.borrow())); + if idx < N { + self.data[idx].as_ref() + } else { + None + } + } +} + +impl GetMut for IndexMap { + fn get_mut>(&mut self, index: Q) -> Option<&mut Self::Output> { + let idx = K::to_index(Some(index.borrow())); + if idx < N { + self.data[idx].as_mut() + } else { + None + } + } + + fn get2_mut>(&mut self, index1: Q, index2: Q) -> (Option<&mut Self::Output>, Option<&mut Self::Output>) { + let idx1 = K::to_index(Some(index1.borrow())); + let idx2 = K::to_index(Some(index2.borrow())); + + if idx1 == idx2 { + debug_assert!(false, "get2_mut called with identical indices"); + return (None, None); + } + + let (left, right) = self.data.split_at_mut(idx1.max(idx2)); + + if idx1 < idx2 { + let elem1 = left[idx1].as_mut(); + let elem2 = right[0].as_mut(); + (elem1, elem2) + } else { + let elem1 = right[0].as_mut(); + let elem2 = left[idx2].as_mut(); + (elem1, elem2) + } + } + + fn get3_mut>( + &mut self, + index1: Q, + index2: Q, + index3: Q, + ) -> (Option<&mut Self::Output>, Option<&mut Self::Output>, Option<&mut Self::Output>) { + let idx1 = K::to_index(Some(index1.borrow())); + let idx2 = K::to_index(Some(index2.borrow())); + let idx3 = K::to_index(Some(index3.borrow())); + + if idx1 == idx2 || idx1 == idx3 || idx2 == idx3 { + debug_assert!(false, "get3_mut called with identical indices"); + return (None, None, None); + } + + let ptr1 = &mut self.data[idx1] as *mut Option; + let ptr2 = &mut self.data[idx2] as *mut Option; + let ptr3 = &mut self.data[idx3] as *mut Option; + + // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. + // And they are disjoint because of the check above. + unsafe { ((*ptr1).as_mut(), (*ptr2).as_mut(), (*ptr3).as_mut()) } + } +} + /// This is a vector that can store up to N elements inline and will allocate on the heap if more are needed. -#[derive(Debug)] +#[proc_macros::fmt] pub struct Vec { len: usize, data: [MaybeUninit; N], @@ -172,7 +253,7 @@ impl Vec { /// `additional` - The additional space to reserve. /// /// Returns `Ok(())` if the space was reserved, otherwise `Err(KernelError::OutOfMemory)`. - pub fn reserve(&mut self, additional: usize) -> Result<(), KernelError> { + pub fn reserve(&mut self, additional: usize) -> Result<()> { let len_extra = self.extra.len(); // Check if we have enough space in the inline storage. @@ -181,11 +262,11 @@ impl Vec { } // If we don't have enough space, we need to grow the extra storage. - let grow = additional - N + len_extra; + let grow = self.len + additional - N; let mut new_extra = Box::new_slice_uninit(grow)?; // Check that the new extra storage has the requested length. - BUG_ON!(new_extra.len() != grow); + bug_on!(new_extra.len() != grow); // Copy the old extra storage into the new one. new_extra[..len_extra].copy_from_slice(&self.extra); @@ -200,7 +281,7 @@ impl Vec { /// `total_capacity` - The total space to be reserved. /// /// Returns `Ok(())` if the space was reserved, otherwise `Err(KernelError::OutOfMemory)`. - pub fn reserve_total_capacity(&mut self, total_capacity: usize) -> Result<(), KernelError> { + pub fn reserve_total_capacity(&mut self, total_capacity: usize) -> Result<()> { // Check if we already have enough space if self.capacity() >= total_capacity { return Ok(()); @@ -211,7 +292,7 @@ impl Vec { let mut new_extra = Box::new_slice_uninit(new_out_of_line_cap)?; // Check that the new extra storage has the requested length. - BUG_ON!(new_extra.len() != new_out_of_line_cap); + bug_on!(new_extra.len() != new_out_of_line_cap); let curr_out_of_line_size = self.extra.len(); // Copy the old extra storage into the new one. @@ -228,7 +309,7 @@ impl Vec { /// `value` - The value to initialize the elements in the Vec with. /// /// Returns the new Vec or `Err(KernelError::OutOfMemory)` if the allocation failed. - pub fn new_init(length: usize, value: T) -> Result { + pub fn new_init(length: usize, value: T) -> Result { let mut vec = Self::new(); // Check if we can fit all elements in the inline storage. @@ -264,7 +345,7 @@ impl Vec { /// `value` - The value to push. /// /// Returns `Ok(())` if the value was pushed, otherwise `Err(KernelError::OutOfMemory)`. - pub fn push(&mut self, value: T) -> Result<(), KernelError> { + pub fn push(&mut self, value: T) -> Result<()> { // Check if we have enough space in the inline storage. if self.len < N { // Push the value into the inline storage. @@ -285,7 +366,7 @@ impl Vec { let grow = (len_extra + 1) * 2; let mut new_extra = Box::new_slice_uninit(grow)?; - BUG_ON!(new_extra.len() != grow); + bug_on!(new_extra.len() != grow); // Copy the old extra storage into the new one. new_extra[..len_extra].copy_from_slice(&self.extra); @@ -368,7 +449,7 @@ impl Vec { /// Returns `Some(&T)` if the index is in-bounds, otherwise `None`. pub fn at(&self, index: usize) -> Option<&T> { // Check if the index is in-bounds. - if index > self.len - 1 { + if index >= self.len { return None; } @@ -403,6 +484,66 @@ impl Vec { } } + fn at_mut_unchecked(&mut self, index: usize) -> *mut T { + if index < N { + // Safety: the elements until self.len are initialized. + // The element at index is nowhere else borrowed mutably by function contract. + self.data[index].as_mut_ptr() + } else { + let index = index - N; + // Safety: the elements until self.len - N are initialized. + // The element at index is nowhere else borrowed mutably by function contract. + self.extra[index].as_mut_ptr() + } + } + + /// Get disjoint mutable references to the values at the given indices. + /// + /// `index1` - The first index. + /// `index2` - The second index. + /// + /// Returns `Some(&mut T, &mut T)` if the indices are in-bounds and disjoint, otherwise `None`. + pub fn at2_mut(&mut self, index1: usize, index2: usize) -> (Option<&mut T>, Option<&mut T>) { + if index1 == index2 { + debug_assert!(false, "at2_mut called with identical indices"); + return (None, None); + } + + let ptr1 = self.at_mut_unchecked(index1); + let ptr2 = self.at_mut_unchecked(index2); + + // Safety: the elements at index1 and index2 are nowhere else borrowed mutably by function contract. + // And they are disjoint because of the check above. + unsafe { (Some(&mut *ptr1), Some(&mut *ptr2)) } + } + + /// Get disjoint mutable references to the values at the given indices. + /// + /// `index1` - The first index. + /// `index2` - The second index. + /// `index3` - The third index. + /// + /// Returns `Some(&mut T, &mut T, &mut T)` if the indices are in-bounds and disjoint, otherwise `None`. + pub fn at3_mut( + &mut self, + index1: usize, + index2: usize, + index3: usize, + ) -> (Option<&mut T>, Option<&mut T>, Option<&mut T>) { + if index1 == index2 || index1 == index3 || index2 == index3 { + debug_assert!(false, "at3_mut called with identical indices"); + return (None, None, None); + } + + let ptr1 = self.at_mut_unchecked(index1); + let ptr2 = self.at_mut_unchecked(index2); + let ptr3 = self.at_mut_unchecked(index3); + + // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. + // And they are disjoint because of the check above. + unsafe { (Some(&mut *ptr1), Some(&mut *ptr2), Some(&mut *ptr3)) } + } + /// Swap the values at the given indices. /// /// `a` - The first index. @@ -456,8 +597,9 @@ impl Drop for Vec { } // Drop all elements in the extra storage. - for elem in &mut (*self.extra)[0..self.len - N] { - // Safety: the elements until self.len - N are initialized. + let extra_init = self.len.saturating_sub(N).min(self.extra.len()); + for elem in &mut (*self.extra)[0..extra_init] { + // Safety: the elements until extra_init are initialized. unsafe { elem.assume_init_drop(); } @@ -465,7 +607,151 @@ impl Drop for Vec { } } +impl Index for Vec { + type Output = T; + + fn index(&self, index: usize) -> &Self::Output { + self.at(index).unwrap() + } +} + +impl IndexMut for Vec { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + self.at_mut(index).unwrap() + } +} + +impl Get for Vec { + type Output = T; + + fn get>(&self, index: Q) -> Option<&Self::Output> { + self.at(*index.borrow()) + } +} + +impl GetMut for Vec { + fn get_mut>(&mut self, index: Q) -> Option<&mut Self::Output> { + self.at_mut(*index.borrow()) + } + + fn get2_mut>( + &mut self, + index1: Q, + index2: Q, + ) -> (Option<&mut Self::Output>, Option<&mut Self::Output>) { + self.at2_mut(*index1.borrow(), *index2.borrow()) + } + + fn get3_mut>( + &mut self, + index1: Q, + index2: Q, + index3: Q, + ) -> (Option<&mut Self::Output>, Option<&mut Self::Output>, Option<&mut Self::Output>) { + self.at3_mut(*index1.borrow(), *index2.borrow(), *index3.borrow()) + } +} + + +#[cfg(test)] +mod tests { + use super::{Vec, IndexMap}; + + #[test] + fn no_length_underflow() { + let vec = Vec::::new(); + assert!(vec.len() == 0); + + // If the length check is wrong, at(0) would panic due to an underflow + assert_eq!(vec.at(0), None); + } + + #[test] + fn reserve_underflow() { + // N=8, fill 7 elements so len=7 and extra.len()=0. + // reserve(2): 7+2=9 > 8+0=8, needs grow. grow = 2 - 8 + 0 = underflow. + let mut vec = Vec::::new(); + for i in 0..7usize { + vec.push(i).unwrap(); + } + assert_eq!(vec.len(), 7); + + // FIXME: Gives OOM error + vec.reserve(2).unwrap(); + } + + #[test] + fn drop_underflow() { + let mut vec = Vec::::new(); + for i in 0..7usize { vec.push(i).unwrap(); } + // Drop used to panic here + drop(vec); + } + + #[test] + fn index_map_iter_cycle_max() { + let m: IndexMap = IndexMap::new(); + let _ = m.iter_from_cycle(Some(&usize::MAX)).next(); + } +} + + + #[cfg(kani)] mod verification { - use super::*; + use super::IndexMap; + use crate::types::traits::Get; + + /// Verify that insert followed by get returns the inserted value. + #[kani::proof] + fn verify_insert_get_roundtrip() { + let mut m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); + let val: u32 = kani::any(); + + m.insert(&idx, val).unwrap(); + assert_eq!(m.get(&idx), Some(&val)); + } + + /// Verify that removing an inserted value leaves the slot empty. + #[kani::proof] + fn verify_remove_clears_slot() { + let mut m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); + let val: u32 = kani::any(); + + m.insert(&idx, val).unwrap(); + let removed = m.remove(&idx); + assert_eq!(removed, Some(val)); + assert_eq!(m.get(&idx), None); + } + + /// Verify iter_from_cycle does not overflow when idx is a valid in-bounds index. + #[kani::proof] + #[kani::unwind(16)] + fn verify_iter_from_cycle_no_overflow_valid_idx() { + let m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); // only valid indices + + // iter_from_cycle computes to_index(Some(idx)) + 1 = idx + 1 (≤ 8, no overflow) + let _ = m.iter_from_cycle(Some(&idx)).next(); + } + + /// Verify next() does not overflow when idx is a valid in-bounds index. + #[kani::proof] + #[kani::unwind(16)] + fn verify_next_no_overflow_valid_idx() { + let mut m: IndexMap = IndexMap::new(); + // Fill all slots so next() iterates the maximum i times. + for i in 0usize..8 { + m.insert(&i, i as u32).unwrap(); + } + let idx: usize = kani::any(); + kani::assume(idx < 8); + let result = m.next(Some(&idx)); + assert!(result.is_some()); // map is full, must find something + } } diff --git a/src/mem/boxed.rs b/src/types/boxed.rs similarity index 82% rename from src/mem/boxed.rs rename to src/types/boxed.rs index c2af5d7..012245b 100644 --- a/src/mem/boxed.rs +++ b/src/types/boxed.rs @@ -1,7 +1,7 @@ //! This module provides a simple heap-allocated memory block for in-kernel use. -use super::{free, malloc}; -use crate::utils::KernelError; +use crate::{error::Result, mem}; + use core::{ mem::{MaybeUninit, forget}, ops::{Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeTo}, @@ -9,7 +9,7 @@ use core::{ }; /// A heap-allocated memory block. -#[derive(Debug)] +#[proc_macros::fmt] pub struct Box { /// Pointer to the heap-allocated memory. /// This is uniquely owned, so no covariance issues. @@ -23,18 +23,18 @@ impl Box<[T]> { /// `len` - The length of the slice. /// /// Returns a new heap-allocated slice with the given length or an error if the allocation failed. - pub fn new_slice_zeroed(len: usize) -> Result { + pub fn new_slice_zeroed(len: usize) -> Result { if len == 0 { return Ok(Self::new_slice_empty()); } - if let Some(ptr) = malloc(size_of::() * len, align_of::()) { + if let Some(ptr) = mem::malloc(size_of::() * len, align_of::()) { let ptr = slice_from_raw_parts_mut(ptr.as_ptr().cast(), len); Ok(Self { ptr: unsafe { NonNull::new_unchecked(ptr) }, }) } else { - Err(KernelError::OutOfMemory) + Err(kerr!(OutOfMemory)) } } @@ -53,8 +53,8 @@ impl Box<[T]> { /// `len` - The length of the slice. /// /// Returns a new heap-allocated slice with the given length or an error if the allocation failed. - pub fn new_slice_uninit(len: usize) -> Result]>, KernelError> { - if let Some(ptr) = malloc( + pub fn new_slice_uninit(len: usize) -> Result]>> { + if let Some(ptr) = mem::malloc( size_of::>() * len, align_of::>(), ) { @@ -63,7 +63,7 @@ impl Box<[T]> { ptr: unsafe { NonNull::new_unchecked(ptr) }, }) } else { - Err(KernelError::OutOfMemory) + Err(kerr!(OutOfMemory)) } } } @@ -76,7 +76,7 @@ impl Box { /// /// Returns a new heap-allocated value or `None` if the allocation failed. pub fn new(value: T) -> Option { - if let Some(ptr) = malloc(size_of::(), align_of::()) { + if let Some(ptr) = mem::malloc(size_of::(), align_of::()) { unsafe { write(ptr.as_ptr().cast(), value); } @@ -139,7 +139,7 @@ impl Drop for Box { } drop_in_place(self.ptr.as_ptr()); - free(self.ptr.cast(), size); + mem::free(self.ptr.cast(), size); } } } @@ -239,39 +239,3 @@ impl AsMut for Box { self.as_mut() } } - -#[cfg(kani)] -mod verification { - use crate::mem::alloc; - - use super::*; - - /* - fn alloc_range(length: usize) -> Option> { - let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); - let ptr = unsafe { std::alloc::alloc(alloc_range) }; - - if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) { - None - } else { - Some(ptr as usize..ptr as usize + length) - } - } - - #[kani::proof] - fn proof_new_slice_zero() { - let mut allocator = alloc::BestFitAllocator::new(); - allocator - - let len = kani::any(); - kani::assume(len < alloc::MAX_ADDR); - - let b = Box::::new_slice_zeroed(len); - - let index = kani::any(); - kani::assume(index < len); - - assert!(b[index] == 0); - } - */ -} diff --git a/src/types/heap.rs b/src/types/heap.rs new file mode 100644 index 0000000..62de196 --- /dev/null +++ b/src/types/heap.rs @@ -0,0 +1,226 @@ +//! This module provides a binary heap implementation. + +use crate::error::Result; + +use super::array::Vec; + +/// An array-based binary heap, with N elements stored inline. +#[proc_macros::fmt] +pub struct BinaryHeap { + vec: Vec, +} + +impl BinaryHeap { + /// Create a new empty binary heap. + pub const fn new() -> Self { + Self { vec: Vec::new() } + } + + /// Push a value onto the binary heap. + /// + /// `value` - The value to push onto the binary heap. + /// + /// Returns `Ok(())` if the value was pushed onto the binary heap, or an error if the heap cannot be extended (e.g. OOM). + pub fn push(&mut self, value: T) -> Result<()> { + self.vec.push(value)?; + self.sift_up(self.len() - 1); + Ok(()) + } + + /// Pop the smallest value from the binary heap. + /// + /// Returns the smallest value in the binary heap, or `None` if the heap is empty. + pub fn pop(&mut self) -> Option { + if self.is_empty() { + return None; + } + + let value = self.peek().cloned(); + self.vec.swap(0, self.len() - 1); + self.vec.pop(); + self.sift_down(0); + value + } + + /// Sift the value at the given index up the binary heap. + /// + /// `index` - The index of the value to sift up. + fn sift_up(&mut self, mut index: usize) { + // We move up the heap until we reach the root or the parent is smaller than the current value. + while index > 0 { + let parent = (index - 1) / 2; + if self.vec.at(parent) <= self.vec.at(index) { + break; + } + self.vec.swap(parent, index); + index = parent; + } + } + + /// Sift the value at the given index down the binary heap. + /// + /// `index` - The index of the value to sift down. + fn sift_down(&mut self, mut index: usize) { + // We move down the heap until we reach a leaf or the value is smaller than both children. + while index < self.len() { + let left = 2 * index + 1; + let right = 2 * index + 2; + let mut smallest = index; + + if left < self.len() && self.vec.at(left) < self.vec.at(smallest) { + smallest = left; + } + + if right < self.len() && self.vec.at(right) < self.vec.at(smallest) { + smallest = right; + } + + if smallest == index { + break; + } + + self.vec.swap(smallest, index); + index = smallest; + } + } + + /// Check if the binary heap is empty. + /// + /// Returns `true` if the binary heap is empty, `false` otherwise. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Peek at the smallest value in the binary heap. + /// + /// Returns the smallest value in the binary heap, or `None` if the heap is empty. + pub fn peek(&self) -> Option<&T> { + if self.is_empty() { + return None; + } + self.vec.at(0) + } + + /// Get the number of elements in the binary heap. + pub fn len(&self) -> usize { + self.vec.len() + } +} + + +#[cfg(kani)] +mod verification { + use super::BinaryHeap; + + /// Verify that pushing a single element and popping it returns the same element. + #[kani::proof] + #[kani::unwind(5)] + fn verify_push_pop_roundtrip() { + let mut heap = BinaryHeap::::new(); + let v: u32 = kani::any(); + heap.push(v).unwrap(); + let popped = heap.pop(); + assert_eq!(popped, Some(v)); + assert!(heap.is_empty()); + } + + /// Verify that pushing two elements and popping gives the smaller one first (min-heap). + #[kani::proof] + #[kani::unwind(5)] + fn verify_min_heap_two_elements() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + let first = heap.pop().unwrap(); + let second = heap.pop().unwrap(); + // Min-heap: first <= second, and {first, second} == {a, b} + assert!(first <= second); + assert!((first == a && second == b) || (first == b && second == a)); + } + + /// Verify that pushing three elements pops them in non-decreasing order. + #[kani::proof] + #[kani::unwind(6)] + fn verify_min_heap_three_elements_sorted() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + let c: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + heap.push(c).unwrap(); + let x = heap.pop().unwrap(); + let y = heap.pop().unwrap(); + let z = heap.pop().unwrap(); + // Must come out in non-decreasing order. + assert!(x <= y); + assert!(y <= z); + } + + /// Verify that peek() always returns the minimum element after arbitrary pushes. + #[kani::proof] + #[kani::unwind(6)] + fn verify_peek_is_minimum() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + let c: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + heap.push(c).unwrap(); + let peeked = *heap.peek().unwrap(); + // peeked must be <= all elements + assert!(peeked <= a); + assert!(peeked <= b); + assert!(peeked <= c); + } +} + + +#[cfg(test)] +mod tests { + use super::BinaryHeap; + + #[test] + fn test_heap_sorted_order() { + let mut heap = BinaryHeap::::new(); + for &v in &[5u32, 2, 8, 1, 9, 3] { + heap.push(v).unwrap(); + } + let mut prev = 0u32; + while let Some(v) = heap.pop() { + assert!(v >= prev, "heap pop out of order: {} after {}", v, prev); + prev = v; + } + } + + #[test] + fn test_heap_single_element() { + let mut heap = BinaryHeap::::new(); + heap.push(42).unwrap(); + assert_eq!(heap.peek(), Some(&42)); + assert_eq!(heap.pop(), Some(42)); + assert!(heap.is_empty()); + } + + #[test] + fn test_heap_empty_peek_pop() { + let mut heap = BinaryHeap::::new(); + assert!(heap.peek().is_none()); + assert!(heap.pop().is_none()); + } + + #[test] + fn test_heap_duplicate_values() { + let mut heap = BinaryHeap::::new(); + heap.push(3).unwrap(); + heap.push(3).unwrap(); + heap.push(1).unwrap(); + assert_eq!(heap.pop(), Some(1)); + assert_eq!(heap.pop(), Some(3)); + assert_eq!(heap.pop(), Some(3)); + } +} + diff --git a/src/types/list.rs b/src/types/list.rs new file mode 100644 index 0000000..f8f3f1b --- /dev/null +++ b/src/types/list.rs @@ -0,0 +1,608 @@ +use core::marker::PhantomData; + +use crate::error::Result; + +use super::traits::{Get, GetMut}; + +#[allow(dead_code)] +pub struct List { + head: Option, + tail: Option, + len: usize, + _tag: PhantomData, +} + +#[allow(dead_code)] +pub trait Linkable { + fn links(&self) -> &Links; + fn links_mut(&mut self) -> &mut Links; +} + +#[allow(dead_code)] +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Links { + prev: Option, + next: Option, + _tag: PhantomData, +} + +#[allow(dead_code)] +impl Links { + pub const fn new() -> Self { + Self { + prev: None, + next: None, + _tag: PhantomData, + } + } +} + +#[allow(dead_code)] +impl List { + pub const fn new() -> Self { + Self { + head: None, + tail: None, + len: 0, + _tag: PhantomData, + } + } + + pub fn head(&self) -> Option { + self.head + } + + pub fn tail(&self) -> Option { + self.tail + } + + pub fn len(&self) -> usize { + self.len + } + + pub fn is_empty(&self) -> bool { + self.len == 0 + } + + pub fn push_front + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where + >::Output: Linkable, + { + self.detach_links(id, storage)?; + + match self.head { + Some(old_head) => { + let (new_node, old_head_node) = storage.get2_mut(id, old_head); + let (new_node, old_head_node) = (new_node.ok_or(kerr!(NotFound))?, old_head_node.unwrap_or_else(|| { + bug!("node linked from list does not exist in storage."); + })); + + new_node.links_mut().prev = None; + new_node.links_mut().next = Some(old_head); + + old_head_node.links_mut().prev = Some(id); + } + None => { + let new_node = storage.get_mut(id).ok_or(kerr!(NotFound))?; + new_node.links_mut().prev = None; + new_node.links_mut().next = None; + self.tail = Some(id); + } + } + + self.head = Some(id); + self.len += 1; + Ok(()) + } + + /// Pushes `id` to the back of the list. If `id` is already in the list, it is moved to the back. + /// + /// Errors if `id` does not exist in `storage` or if the node corresponding to `id` is linked but not in the list. + pub fn push_back + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where + >::Output: Linkable, + { + self.detach_links(id, storage)?; + + match self.tail { + Some(old_tail) => { + let (new_node, old_tail_node) = storage.get2_mut(id, old_tail); + let (new_node, old_tail_node) = (new_node.ok_or(kerr!(NotFound))?, old_tail_node.unwrap_or_else(|| { + bug!("node linked from list does not exist in storage."); + })); + + new_node.links_mut().next = None; + new_node.links_mut().prev = Some(old_tail); + + old_tail_node.links_mut().next = Some(id); + } + None => { + let new_node = storage.get_mut(id).ok_or(kerr!(NotFound))?; + new_node.links_mut().next = None; + new_node.links_mut().prev = None; + self.head = Some(id); + } + } + + self.tail = Some(id); + self.len += 1; + Ok(()) + } + + pub fn pop_front + GetMut>(&mut self, storage: &mut S) -> Result> + where + >::Output: Linkable, + { + let Some(id) = self.head else { + return Ok(None); + }; + + self.remove(id, storage)?; + Ok(Some(id)) + } + + pub fn pop_back + GetMut>(&mut self, storage: &mut S) -> Result> + where + >::Output: Linkable, + { + let Some(id) = self.tail else { + return Ok(None); + }; + + self.remove(id, storage)?; + Ok(Some(id)) + } + + /// Removes `id` from the list. Errors if `id` does not exist in `storage` or if the node corresponding to `id` is not linked. + pub fn remove + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where + >::Output: Linkable, + { + let (prev, next, linked) = { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let links = node.links(); + let linked = self.head == Some(id) + || self.tail == Some(id) + || links.prev.is_some() + || links.next.is_some(); + (links.prev, links.next, linked) + }; + + if !linked { + return Err(kerr!(NotFound)); + } + + if let Some(prev_id) = prev { + let prev_node = storage.get_mut(prev_id).unwrap_or_else(|| { + bug!("node linked from list does not exist in storage."); + }); + prev_node.links_mut().next = next; + } else { + self.head = next; + } + + if let Some(next_id) = next { + let next_node = storage.get_mut(next_id).unwrap_or_else(|| { + bug!("node linked from list does not exist in storage."); + }); + next_node.links_mut().prev = prev; + } else { + self.tail = prev; + } + + let node = storage.get_mut(id).ok_or(kerr!(NotFound))?; + node.links_mut().prev = None; + node.links_mut().next = None; + + self.len = self.len.saturating_sub(1); + Ok(()) + } + + /// Detaches `id` from any list it is currently in. If `id` is not in any list but is linked, the links are cleared. + fn detach_links + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where + >::Output: Linkable, + { + let linked = { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let links = node.links(); + self.head == Some(id) + || self.tail == Some(id) + || links.prev.is_some() + || links.next.is_some() + }; + + if linked { + self.remove(id, storage)?; + } else { + let node = storage.get_mut(id).ok_or(kerr!(NotFound))?; + node.links_mut().prev = None; + node.links_mut().next = None; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use core::borrow::Borrow; + + use super::{Linkable, Links, List}; + use crate::types::{array::IndexMap, traits::{Get, ToIndex}}; + + #[proc_macros::fmt] + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] + struct Id(usize); + + impl ToIndex for Id { + fn to_index>(idx: Option) -> usize { + idx.as_ref().map_or(0, |k| k.borrow().0) + } + } + + #[derive(Clone, Copy)] + struct TestTag; + + struct Node { + links: Links, + } + + impl Node { + fn new() -> Self { + Self { + links: Links::new(), + } + } + } + + impl Linkable for Node { + fn links(&self) -> &Links { + &self.links + } + + fn links_mut(&mut self) -> &mut Links { + &mut self.links + } + } + + fn storage() -> IndexMap { + let mut map = IndexMap::new(); + for i in 0..4 { + assert!(map.insert(&Id(i), Node::new()).is_ok()); + } + map + } + + #[test] + fn push_front_and_remove() { + let mut s = storage(); + let mut list = List::::new(); + + list.push_front(Id(1), &mut s).unwrap(); + list.push_front(Id(2), &mut s).unwrap(); + list.push_front(Id(3), &mut s).unwrap(); + + assert_eq!(list.head(), Some(Id(3))); + assert_eq!(list.tail(), Some(Id(1))); + assert_eq!(list.len(), 3); + + list.remove(Id(2), &mut s).unwrap(); + assert_eq!(list.head(), Some(Id(3))); + assert_eq!(list.tail(), Some(Id(1))); + assert_eq!(list.len(), 2); + + let n3 = s.get(Id(3)).unwrap(); + let n1 = s.get(Id(1)).unwrap(); + assert_eq!(n3.links().next, Some(Id(1))); + assert_eq!(n1.links().prev, Some(Id(3))); + } + + #[test] + fn push_back_and_remove() { + let mut s = storage(); + let mut list = List::::new(); + + list.push_back(Id(1), &mut s).unwrap(); + list.remove(Id(1), &mut s); + + assert_eq!(list.head(), None); + assert_eq!(list.tail(), None); + assert_eq!(list.len(), 0); + } + + #[test] + fn push_back_same_id_reinserts() { + let mut s = storage(); + let mut list = List::::new(); + + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + + assert_eq!(list.head(), Some(Id(1))); + assert_eq!(list.tail(), Some(Id(1))); + assert_eq!(list.len(), 1); + + let n1 = s.get(Id(1)).unwrap(); + assert_eq!(n1.links().prev, None); + assert_eq!(n1.links().next, None); + } + + #[test] + fn pop_back_ordered() { + let mut s = storage(); + let mut list = List::::new(); + + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + list.push_back(Id(3), &mut s).unwrap(); + + assert_eq!(list.pop_back(&mut s).unwrap(), Some(Id(3))); + assert_eq!(list.pop_back(&mut s).unwrap(), Some(Id(2))); + assert_eq!(list.pop_back(&mut s).unwrap(), Some(Id(1))); + assert_eq!(list.pop_back(&mut s).unwrap(), None); + assert!(list.is_empty()); + } + + #[test] + fn pop_front_ordered() { + let mut s = storage(); + let mut list = List::::new(); + + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + list.push_back(Id(3), &mut s).unwrap(); + + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(1))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(2))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(3))); + assert_eq!(list.pop_front(&mut s).unwrap(), None); + assert!(list.is_empty()); + } +} + +#[cfg(kani)] +mod verification { + use core::borrow::Borrow; + + use super::{Linkable, Links, List}; + use crate::types::{array::IndexMap, traits::{Get, ToIndex}}; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] + struct Id(usize); + + impl ToIndex for Id { + fn to_index>(idx: Option) -> usize { + idx.as_ref().map_or(0, |k| k.borrow().0) + } + } + + #[derive(Clone, Copy)] + struct Tag; + + struct Node { + links: Links, + } + + impl Node { + fn new() -> Self { + Self { links: Links::new() } + } + } + + impl Linkable for Node { + fn links(&self) -> &Links { &self.links } + fn links_mut(&mut self) -> &mut Links { &mut self.links } + } + + fn make_storage() -> IndexMap { + let mut map = IndexMap::new(); + map.insert(&Id(0), Node::new()).unwrap(); + map.insert(&Id(1), Node::new()).unwrap(); + map.insert(&Id(2), Node::new()).unwrap(); + map.insert(&Id(3), Node::new()).unwrap(); + map + } + + /// Verifies the bug! in push_front (old_head not in storage) is unreachable + /// through correct API usage: all IDs we push exist in storage. + #[kani::proof] + fn verify_push_front_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_front(Id(0), &mut s).unwrap(); + list.push_front(Id(1), &mut s).unwrap(); + list.push_front(Id(2), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(2))); + assert_eq!(list.tail(), Some(Id(0))); + } + + /// Verifies the bug! in push_back (old_tail not in storage) is unreachable + /// through correct API usage. + #[kani::proof] + fn verify_push_back_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(0))); + assert_eq!(list.tail(), Some(Id(2))); + } + + /// Verifies the bug! calls in remove (prev/next not in storage) are unreachable + /// when removing the middle element of a 3-item list. + #[kani::proof] + fn verify_remove_middle_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + list.remove(Id(1), &mut s).unwrap(); + + assert_eq!(list.len(), 2); + assert_eq!(list.head(), Some(Id(0))); + assert_eq!(list.tail(), Some(Id(2))); + assert_eq!(s.get(Id(0)).unwrap().links().next, Some(Id(2))); + assert_eq!(s.get(Id(2)).unwrap().links().prev, Some(Id(0))); + } + + /// Verifies pop_front on empty list returns Ok(None) without panic. + #[kani::proof] + fn verify_pop_empty_no_panic() { + let mut s = make_storage(); + let mut list = List::::new(); + let result = list.pop_front(&mut s); + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + } + + /// Verifies length invariant: push N distinct items, len == N. + /// Uses symbolic ID ordering so kani explores all 3! = 6 permutations. + #[kani::proof] + #[kani::unwind(4)] + fn verify_len_invariant_push_three() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + let c: usize = kani::any(); + kani::assume(a < 4 && b < 4 && c < 4); + kani::assume(a != b && b != c && a != c); + + list.push_back(Id(a), &mut s).unwrap(); + list.push_back(Id(b), &mut s).unwrap(); + list.push_back(Id(c), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(c))); + } + + /// Verifies that reinserting an already-present item does not change len. + #[kani::proof] + fn verify_reinsert_preserves_len() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + assert_eq!(list.len(), 2); + + list.push_back(Id(1), &mut s).unwrap(); + assert_eq!(list.len(), 2); + } + + /// Verifies full push/pop cycle leaves list empty. + #[kani::proof] + fn verify_push_pop_cycle_empty() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + list.pop_front(&mut s).unwrap(); + list.pop_front(&mut s).unwrap(); + list.pop_front(&mut s).unwrap(); + + assert!(list.is_empty()); + assert_eq!(list.len(), 0); + assert!(list.head().is_none()); + assert!(list.tail().is_none()); + } + + // ----------------------------------------------------------------------- + // FIFO ordering and len ≤ N proofs (Task #12) + // ----------------------------------------------------------------------- + + /// Verify FIFO ordering: push_back(a, b, c) then pop_front yields a, b, c in order. + /// Uses symbolic distinct IDs so Kani explores all 24 permutations (4P3 = 24). + #[kani::proof] + #[kani::unwind(8)] + fn verify_fifo_ordering_symbolic() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + let c: usize = kani::any(); + kani::assume(a < 4 && b < 4 && c < 4); + kani::assume(a != b && b != c && a != c); + + list.push_back(Id(a), &mut s).unwrap(); + list.push_back(Id(b), &mut s).unwrap(); + list.push_back(Id(c), &mut s).unwrap(); + + // FIFO: pop order must match push order + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(a))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(b))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(c))); + assert_eq!(list.pop_front(&mut s).unwrap(), None); + assert!(list.is_empty()); + } + + /// Verify len ≤ N: after any sequence of pushes with N distinct nodes in storage, + /// len never exceeds N. + /// With 4-slot storage, push all 4 distinct IDs → len == 4. + /// Re-inserting an existing ID is an in-place move, not an increase. + #[kani::proof] + #[kani::unwind(6)] + fn verify_len_never_exceeds_capacity() { + let mut s = make_storage(); // 4 slots + let mut list = List::::new(); + + // Fill the list. + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + list.push_back(Id(3), &mut s).unwrap(); + assert_eq!(list.len(), 4); + assert!(list.len() <= 4); + + // Re-inserting an already-present ID must not increase len. + let x: usize = kani::any(); + kani::assume(x < 4); + list.push_back(Id(x), &mut s).unwrap(); + assert_eq!(list.len(), 4); + assert!(list.len() <= 4); + } + + /// Verify that head() is always the first-inserted element and tail() is always + /// the last, for any two symbolic distinct IDs. + #[kani::proof] + #[kani::unwind(5)] + fn verify_head_tail_invariant() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + kani::assume(a < 4 && b < 4 && a != b); + + list.push_back(Id(a), &mut s).unwrap(); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(a))); + + list.push_back(Id(b), &mut s).unwrap(); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(b))); + + // After popping the front, tail stays, new head is b. + list.pop_front(&mut s).unwrap(); + assert_eq!(list.head(), Some(Id(b))); + assert_eq!(list.tail(), Some(Id(b))); + } +} diff --git a/src/mem/pool.rs b/src/types/pool.rs similarity index 100% rename from src/mem/pool.rs rename to src/types/pool.rs diff --git a/src/mem/queue.rs b/src/types/queue.rs similarity index 99% rename from src/mem/queue.rs rename to src/types/queue.rs index 4e2d159..fbd0ba0 100644 --- a/src/mem/queue.rs +++ b/src/types/queue.rs @@ -5,7 +5,7 @@ use super::boxed::Box; use crate::utils::KernelError; /// A ring-buffer based queue, with N elements stored inline. TODO: Make this growable. -#[derive(Debug)] +#[proc_macros::fmt] pub struct Queue { data: Vec, len: usize, diff --git a/src/types/rbtree.rs b/src/types/rbtree.rs new file mode 100644 index 0000000..9870ae8 --- /dev/null +++ b/src/types/rbtree.rs @@ -0,0 +1,1633 @@ +use core::{marker::PhantomData}; + +use crate::error::Result; + +use super::traits::{Get, GetMut}; + +#[allow(dead_code)] +pub struct RbTree { + root: Option, + min: Option, + _tag: PhantomData, +} + +#[allow(dead_code)] +pub trait Linkable { + fn links(&self) -> &Links; + fn links_mut(&mut self) -> &mut Links; +} + +pub trait Compare { + fn cmp(&self, other: &Self) -> core::cmp::Ordering; +} + +#[allow(dead_code)] +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, Eq)] +pub struct Links { + parent: Option, + left: Option, + right: Option, + color: Color, + _tag: PhantomData, +} + +#[allow(dead_code)] +impl Links { + pub fn new() -> Self { + Self { + parent: None, + left: None, + right: None, + color: Color::Red, + _tag: PhantomData, + } + } +} + +#[proc_macros::fmt] +#[derive(Clone, Copy, PartialEq, Eq)] +enum Color { + Red, + Black, +} + +#[allow(dead_code)] +impl RbTree +{ + pub const fn new() -> Self { + Self { + root: None, + min: None, + _tag: PhantomData, + } + } + + /// Inserts `id` into the tree. If `id` already exists in the tree, it is first removed and then re-inserted. Errors if `id` does not exist in `storage`. + pub fn insert< + S: Get + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare,{ + let already_linked = { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let links = node.links(); + self.root == Some(id) + || links.parent.is_some() + || links.left.is_some() + || links.right.is_some() + }; + + if already_linked { + self.remove(id, storage)?; + } + + let mut last = None; + + { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let mut current = self.root; + + while let Some(current_id) = current { + last = current; + let current_node = storage.get(current_id).unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + let go_left = node.cmp(current_node) == core::cmp::Ordering::Less; + + current = if go_left { + current_node.links().left + } else { + current_node.links().right + }; + } + } + + { + let node = storage.get_mut(id).ok_or(kerr!(NotFound))?.links_mut(); + node.parent = last; + node.left = None; + node.right = None; + node.color = Color::Red; + } + + match last { + None => self.root = Some(id), + Some(last_id) => { + if let (Some(node), Some(last)) = storage.get2_mut(id, last_id) { + if node.cmp(last) == core::cmp::Ordering::Less { + last.links_mut().left = Some(id); + } else { + last.links_mut().right = Some(id); + } + } + } + } + + if let Some(min_id) = self.min { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let min_node = storage.get(min_id).unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + if node.cmp(min_node) == core::cmp::Ordering::Less { + self.min = Some(id); + } + } else { + self.min = Some(id); + } + + self.insert_fixup(id, storage) + } + + pub fn remove + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare { + let (node_left, node_right, node_parent, node_is_red, linked) = { + let node = storage.get(id).ok_or(kerr!(NotFound))?; + let links = node.links(); + ( + links.left, + links.right, + links.parent, + matches!(links.color, Color::Red), + self.root == Some(id) + || links.parent.is_some() + || links.left.is_some() + || links.right.is_some(), + ) + }; + + if !linked { + return Err(kerr!(NotFound)); + } + + let mut succ_was_red = node_is_red; + let child: Option; + let child_parent: Option; + + if node_left.is_none() { + child = node_right; + child_parent = node_parent; + + self.transplant(id, node_right, storage)?; + } else if node_right.is_none() { + child = node_left; + child_parent = node_parent; + + self.transplant(id, node_left, storage)?; + } else { + let right_id = node_right.unwrap_or_else(|| { + bug!("node's right child is None, but it is not None according to previous get."); + }); + let succ = self.minimum(right_id, storage)?; + let succ_right = storage.get(succ).and_then(|n| n.links().right); + let succ_parent = storage.get(succ).and_then(|n| n.links().parent); + + succ_was_red = storage + .get(succ) + .map_or(false, |n| matches!(n.links().color, Color::Red)); + child = succ_right; + + if succ_parent == Some(id) { + child_parent = Some(succ); + } else { + self.transplant(succ, succ_right, storage)?; + + if let (Some(succ_node), Some(right_node)) = storage.get2_mut(succ, right_id) { + succ_node.links_mut().right = Some(right_id); + right_node.links_mut().parent = Some(succ); + } else { + bug!("node linked from tree does not exist in storage."); + } + + child_parent = succ_parent; + } + + self.transplant(id, Some(succ), storage)?; + + let left_id = node_left.unwrap_or_else(|| { + bug!("node's left child is None, but it is not None according to previous get."); + }); + + if let (Some(succ_node), Some(left_node)) = storage.get2_mut(succ, left_id) { + succ_node.links_mut().left = Some(left_id); + left_node.links_mut().parent = Some(succ); + } else { + bug!("node linked from tree does not exist in storage."); + } + + if let Some(succ_node) = storage.get_mut(succ) { + succ_node.links_mut().color = if node_is_red { + Color::Red + } else { + Color::Black + }; + } else { + bug!("node linked from tree does not exist in storage."); + } + } + + if !succ_was_red { + self.delete_fixup(child, child_parent, storage)?; + } + + // Fully detach the removed node so stale links cannot be observed on reinsertion. + if let Some(node) = storage.get_mut(id) { + let links = node.links_mut(); + links.parent = None; + links.left = None; + links.right = None; + links.color = Color::Red; + } else { + bug!("node linked from tree does not exist in storage."); + } + + if self.min == Some(id) { + self.min = match self.root { + Some(root_id) => Some(self.minimum(root_id, storage)?), + None => None, + }; + } + + Ok(()) + } + + pub fn min(&self) -> Option { + self.min + } + + fn insert_fixup + GetMut>(&mut self, mut id: T, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare, { + while let Some(parent) = storage.get(id).and_then(|n| n.links().parent) + && storage + .get(parent) + .map_or(false, |n| matches!(n.links().color, Color::Red)) + { + let grandparent = storage + .get(parent) + .and_then(|n| n.links().parent) + .unwrap_or_else(|| { + bug!("node linked from tree does not have a parent."); + }); + + // Is left child node + if storage + .get(grandparent) + .map_or(false, |n| n.links().left == Some(parent)) + { + // Uncle node must be the right child node + let uncle = storage.get(grandparent).and_then(|n| n.links().right); + + if let Some(uncle_id) = uncle + && storage + .get(uncle_id) + .map_or(false, |n| matches!(n.links().color, Color::Red)) + { + // Parent and uncle nodes are red + if let (Some(parent_node), Some(uncle_node), Some(grandparent_node)) = + storage.get3_mut(parent, uncle_id, grandparent) + { + parent_node.links_mut().color = Color::Black; + uncle_node.links_mut().color = Color::Black; + grandparent_node.links_mut().color = Color::Red; + } + id = grandparent; + } else { + // Uncle node is black + if storage + .get(parent) + .map_or(false, |n| n.links().right == Some(id)) + { + let old_parent = parent; + self.rotate_left(parent, id, storage)?; + id = old_parent; + } + + let parent = storage.get(id).and_then(|n| n.links().parent).ok_or(kerr!(NotFound))?; + let grandparent = storage + .get(parent) + .and_then(|n| n.links().parent) + .unwrap_or_else(|| { + bug!("node linked from tree does not have a parent."); + }); + + if let (Some(parent_node), Some(grandparent_node)) = + storage.get2_mut(parent, grandparent) + { + parent_node.links_mut().color = Color::Black; + grandparent_node.links_mut().color = Color::Red; + } + self.rotate_right(grandparent, parent, storage)?; + break; + } + } else { + // Uncle node must be the left child + let uncle = storage.get(grandparent).and_then(|n| n.links().left); + + if let Some(uncle_id) = uncle + && storage + .get(uncle_id) + .map_or(false, |n| matches!(n.links().color, Color::Red)) + { + // Parent and uncle nodes are red + if let (Some(parent_node), Some(uncle_node), Some(grandparent_node)) = + storage.get3_mut(parent, uncle_id, grandparent) + { + parent_node.links_mut().color = Color::Black; + uncle_node.links_mut().color = Color::Black; + grandparent_node.links_mut().color = Color::Red; + } + id = grandparent; + } else { + // Uncle node is black + if storage + .get(parent) + .map_or(false, |n| n.links().left == Some(id)) + { + let old_parent = parent; + self.rotate_right(parent, id, storage)?; + id = old_parent; + } + + let parent = storage.get(id).and_then(|n| n.links().parent).ok_or(kerr!(NotFound))?; + let grandparent = storage + .get(parent) + .and_then(|n| n.links().parent) + .unwrap_or_else(|| { + bug!("node linked from tree does not have a parent."); + }); + + if let (Some(parent_node), Some(grandparent_node)) = + storage.get2_mut(parent, grandparent) + { + parent_node.links_mut().color = Color::Black; + grandparent_node.links_mut().color = Color::Red; + } + self.rotate_left(grandparent, parent, storage)?; + break; + } + } + } + + if let Some(root_id) = self.root { + if let Some(root_node) = storage.get_mut(root_id) { + root_node.links_mut().color = Color::Black; + } + } + + Ok(()) + } + + fn delete_fixup + GetMut>( + &mut self, + mut id: Option, + mut parent: Option, + storage: &mut S, + ) -> Result<()> + where >::Output: Linkable + Compare, { + let is_red = |node_id: Option, storage: &S| -> bool { + node_id + .and_then(|id| storage.get(id)) + .map_or(false, |n| matches!(n.links().color, Color::Red)) + }; + + let is_black = |node_id: Option, storage: &S| -> bool { !is_red(node_id, storage) }; + + while id != self.root && is_black(id, storage) { + let parent_id = parent.unwrap_or_else(|| { + bug!("node linked from tree does not have a parent."); + }); + + let is_left_child = storage + .get(parent_id) + .map_or(false, |n| n.links().left == id); + + if is_left_child { + let mut sibling_opt = storage.get(parent_id).and_then(|n| n.links().right); + + if is_red(sibling_opt, storage) { + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + // Color sibling node black and parent node red, rotate + if let (Some(sib), Some(par)) = storage.get2_mut(sibling_id, parent_id) { + sib.links_mut().color = Color::Black; + par.links_mut().color = Color::Red; + } else { + return Err(kerr!(NotFound)); + } + self.rotate_left(parent_id, sibling_id, storage)?; + sibling_opt = storage.get(parent_id).and_then(|n| n.links().right); + } + + // Sibling node is black + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + let sib_left = storage.get(sibling_id).and_then(|n| n.links().left); + let sib_right = storage.get(sibling_id).and_then(|n| n.links().right); + + if is_black(sib_left, storage) && is_black(sib_right, storage) { + // Color sibling node red and move up + if let Some(sib) = storage.get_mut(sibling_id) { + sib.links_mut().color = Color::Red; + } else { + bug!("node linked from tree does not exist in storage."); + } + id = Some(parent_id); + parent = storage.get(parent_id).and_then(|n| n.links().parent); + } else { + // Sibling's left node is red + if is_black(sib_right, storage) { + let sib_left_id = sib_left.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + if let (Some(sib), Some(left)) = storage.get2_mut(sibling_id, sib_left_id) { + sib.links_mut().color = Color::Red; + left.links_mut().color = Color::Black; + } else { + bug!("node linked from tree does not exist in storage."); + } + self.rotate_right(sibling_id, sib_left_id, storage)?; + sibling_opt = storage.get(parent_id).and_then(|n| n.links().right); + } + + // Sibling's right child node is red + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + let parent_is_red = storage + .get(parent_id) + .map_or(false, |n| matches!(n.links().color, Color::Red)); + + if let Some(sib) = storage.get_mut(sibling_id) { + sib.links_mut().color = if parent_is_red { + Color::Red + } else { + Color::Black + }; + } + if let Some(par) = storage.get_mut(parent_id) { + par.links_mut().color = Color::Black; + } + + let sib_right = storage.get(sibling_id).and_then(|n| n.links().right); + if let Some(sib_right_id) = sib_right { + if let Some(right) = storage.get_mut(sib_right_id) { + right.links_mut().color = Color::Black; + } + } + + self.rotate_left(parent_id, sibling_id, storage)?; + id = self.root; + break; + } + } else { + let mut sibling_opt = storage.get(parent_id).and_then(|n| n.links().left); + + if is_red(sibling_opt, storage) { + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + if let (Some(sib), Some(par)) = storage.get2_mut(sibling_id, parent_id) { + sib.links_mut().color = Color::Black; + par.links_mut().color = Color::Red; + } else { + bug!("node linked from tree does not exist in storage."); + } + self.rotate_right(parent_id, sibling_id, storage)?; + sibling_opt = storage.get(parent_id).and_then(|n| n.links().left); + } + + // Sibling node is black + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + let sib_left = storage.get(sibling_id).and_then(|n| n.links().left); + let sib_right = storage.get(sibling_id).and_then(|n| n.links().right); + + if is_black(sib_left, storage) && is_black(sib_right, storage) { + if let Some(sib) = storage.get_mut(sibling_id) { + sib.links_mut().color = Color::Red; + } else { + bug!("node linked from tree does not exist in storage."); + } + id = Some(parent_id); + parent = storage.get(parent_id).and_then(|n| n.links().parent); + } else { + // Sibling's right node is red + if is_black(sib_left, storage) { + let sib_right_id = sib_right.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + if let (Some(sib), Some(right)) = storage.get2_mut(sibling_id, sib_right_id) + { + sib.links_mut().color = Color::Red; + right.links_mut().color = Color::Black; + } else { + bug!("node linked from tree does not exist in storage."); + } + self.rotate_left(sibling_id, sib_right_id, storage)?; + sibling_opt = storage.get(parent_id).and_then(|n| n.links().left); + } + + // Sibling's left child node is red + let sibling_id = sibling_opt.unwrap_or_else(|| { + bug!("node linked from tree does not exist in storage."); + }); + let parent_is_red = storage + .get(parent_id) + .map_or(false, |n| matches!(n.links().color, Color::Red)); + + if let Some(sib) = storage.get_mut(sibling_id) { + sib.links_mut().color = if parent_is_red { + Color::Red + } else { + Color::Black + }; + } + if let Some(par) = storage.get_mut(parent_id) { + par.links_mut().color = Color::Black; + } + + let sib_left = storage.get(sibling_id).and_then(|n| n.links().left); + if let Some(sib_left_id) = sib_left { + if let Some(left) = storage.get_mut(sib_left_id) { + left.links_mut().color = Color::Black; + } + } + + self.rotate_right(parent_id, sibling_id, storage)?; + id = self.root; + break; + } + } + } + + // Color the root node black + if let Some(id) = id { + if let Some(node) = storage.get_mut(id) { + node.links_mut().color = Color::Black; + } + } + + Ok(()) + } + + fn minimum>(&self, mut id: T, storage: &S) -> Result + where >::Output: Linkable + Compare, { + loop { + let left = storage.get(id).ok_or(kerr!(NotFound))?.links().left; + match left { + Some(left_id) => id = left_id, + None => return Ok(id), + } + } + } + + fn transplant + GetMut>(&mut self, u: T, v: Option, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare, { + let u_parent = storage.get(u).and_then(|n| n.links().parent); + + match u_parent { + None => self.root = v, + Some(parent_id) => { + if let Some(parent_node) = storage.get_mut(parent_id) { + if parent_node.links().left == Some(u) { + parent_node.links_mut().left = v; + } else { + parent_node.links_mut().right = v; + } + } else { + bug!("node linked from tree does not exist in storage."); + } + } + } + + if let Some(v_id) = v { + if let Some(v_node) = storage.get_mut(v_id) { + v_node.links_mut().parent = u_parent; + } else { + bug!("node linked from tree does not exist in storage."); + } + } + + Ok(()) + } + + fn rotate_right + GetMut>(&mut self, pivot: T, left: T, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare, { + if pivot == left { + return Err(kerr!(NotFound)); + } + + let (right, parent) = + if let (Some(pivot_node), Some(left_node)) = storage.get2_mut(pivot, left) { + // Add left child's right subtree as pivot's left subtree + pivot_node.links_mut().left = left_node.links().right; + + // Add pivot's parent as left child's parent + left_node.links_mut().parent = pivot_node.links().parent; + + let old_right = left_node.links().right; + + // Set pivot as the right child of left child + left_node.links_mut().right = Some(pivot); + + let old_parent = pivot_node.links().parent; + + // Set pivot's parent to left child + pivot_node.links_mut().parent = Some(left); + + (old_right, old_parent) + } else { + bug!("node linked from tree does not exist in storage."); + }; + + if let Some(right_id) = right { + if let Some(right_node) = storage.get_mut(right_id) { + right_node.links_mut().parent = Some(pivot); + } + } + + match parent { + None => self.root = Some(left), + Some(parent_id) => { + if let Some(parent_node) = storage.get_mut(parent_id) { + if parent_node.links().left == Some(pivot) { + parent_node.links_mut().left = Some(left); + } else { + parent_node.links_mut().right = Some(left); + } + } else { + bug!("node linked from tree does not exist in storage."); + } + } + } + + Ok(()) + } + + fn rotate_left + GetMut>(&mut self, pivot: T, right: T, storage: &mut S) -> Result<()> + where >::Output: Linkable + Compare, { + if pivot == right { + return Err(kerr!(NotFound)); + } + + let (left, parent) = + if let (Some(pivot_node), Some(right_node)) = storage.get2_mut(pivot, right) { + // Add right child's left subtree as pivot's right subtree + pivot_node.links_mut().right = right_node.links().left; + + // Add pivot's parent as right child's parent + right_node.links_mut().parent = pivot_node.links().parent; + + let old_left = right_node.links().left; + + // Set pivot as the left child of right child + right_node.links_mut().left = Some(pivot); + + let old_parent = pivot_node.links().parent; + + // Set pivot's parent to right child + pivot_node.links_mut().parent = Some(right); + + (old_left, old_parent) + } else { + bug!("node linked from tree does not exist in storage."); + }; + + if let Some(left_id) = left { + if let Some(left_node) = storage.get_mut(left_id) { + left_node.links_mut().parent = Some(pivot); + } + } + + match parent { + None => self.root = Some(right), + Some(parent_id) => { + if let Some(parent_node) = storage.get_mut(parent_id) { + if parent_node.links().left == Some(pivot) { + parent_node.links_mut().left = Some(right); + } else { + parent_node.links_mut().right = Some(right); + } + } else { + bug!("node linked from tree does not exist in storage."); + } + } + } + Ok(()) + } +} + +// TESTING ------------------------------------------------------------------------------------------------------------ + +#[cfg(any(test, kani))] +mod test_common { + use super::{Compare, Linkable, Links}; + + pub(super) struct Tree; + + pub(super) struct Node { + pub(super) key: i32, + pub(super) links: Links, + } + + impl Node { + pub(super) fn new(key: i32) -> Self { + Self { + key, + links: Links::new(), + } + } + } + + impl Compare for Node { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.key.cmp(&other.key) + } + } + + impl Linkable for Node { + fn links(&self) -> &Links { + &self.links + } + + fn links_mut(&mut self) -> &mut Links { + &mut self.links + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::{Get, GetMut}; + use super::test_common::{Tree, Node}; + use std::borrow::Borrow; + use std::collections::HashSet; + + struct NodeStore { + nodes: Vec, + } + + impl NodeStore { + fn new(keys: &[i32]) -> Self { + Self { + nodes: keys.iter().copied().map(Node::new).collect(), + } + } + } + + impl Get for NodeStore { + type Output = Node; + + fn get>(&self, index: K) -> Option<&Self::Output> { + self.nodes.get(*index.borrow()) + } + } + + impl GetMut for NodeStore { + fn get_mut>(&mut self, index: K) -> Option<&mut Self::Output> { + self.nodes.get_mut(*index.borrow()) + } + + fn get2_mut>( + &mut self, + index1: K, + index2: K, + ) -> (Option<&mut Self::Output>, Option<&mut Self::Output>) { + if *index1.borrow() == *index2.borrow() { + return (None, None); + } + + let ptr = self.nodes.as_ptr(); + + return unsafe { + ( + Some(&mut *(ptr.add(*index1.borrow()) as *mut Self::Output)), + Some(&mut *(ptr.add(*index2.borrow()) as *mut Self::Output)), + ) + }; + } + + fn get3_mut>( + &mut self, + index1: K, + index2: K, + index3: K, + ) -> ( + Option<&mut Self::Output>, + Option<&mut Self::Output>, + Option<&mut Self::Output>, + ) { + if *index1.borrow() == *index2.borrow() + || *index1.borrow() == *index3.borrow() + || *index2.borrow() == *index3.borrow() + { + return (None, None, None); + } + + let ptr = self.nodes.as_ptr(); + return unsafe { + ( + Some(&mut *(ptr.add(*index1.borrow()) as *mut Self::Output)), + Some(&mut *(ptr.add(*index2.borrow()) as *mut Self::Output)), + Some(&mut *(ptr.add(*index3.borrow()) as *mut Self::Output)), + ) + }; + } + } + + fn validate_tree(tree: &RbTree, store: &NodeStore, expected: &[i32]) { + let mut visited = HashSet::new(); + + if let Some(root_id) = tree.root { + let root = store.get(root_id).expect("root missing from store"); + assert!(matches!(root.links().color, Color::Black)); + assert_eq!(root.links().parent, None); + } + + let (count, _) = validate_node(tree.root, store, &mut visited, expected); + assert_eq!(count, expected.len()); + + if !expected.is_empty() { + let min = tree_min_key(tree, store).expect("non-empty tree must contain a min."); + assert_eq!(min, expected[0]); + } + } + + fn tree_min_key(tree: &RbTree, store: &NodeStore) -> Option { + tree.min().map(|id| store.get(id).expect("min missing").key) + } + + fn validate_node( + id: Option, + store: &NodeStore, + visited: &mut HashSet, + expected: &[i32], + ) -> (usize, usize) { + let Some(id) = id else { + return (0, 1); + }; + + assert!(visited.insert(id)); + + let node = store.get(id).expect("node missing from store"); + + let left = node.links().left; + let right = node.links().right; + + if matches!(node.links().color, Color::Red) { + if let Some(left_id) = left { + let left_node = store.get(left_id).expect("left missing"); + assert!(matches!(left_node.links().color, Color::Black)); + } + if let Some(right_id) = right { + let right_node = store.get(right_id).expect("right missing"); + assert!(matches!(right_node.links().color, Color::Black)); + } + } + + if let Some(left_id) = left { + let left_node = store.get(left_id).expect("left missing"); + assert_eq!(left_node.links().parent, Some(id)); + } + if let Some(right_id) = right { + let right_node = store.get(right_id).expect("right missing"); + assert_eq!(right_node.links().parent, Some(id)); + } + + let (left_count, left_bh) = validate_node(left, store, visited, &expected); + assert_eq!( + node.key, expected[left_count], + "expected key {}, found {}", + expected[left_count], node.key + ); + let (right_count, right_bh) = + validate_node(right, store, visited, &expected[1 + left_count..]); + + assert_eq!( + left_bh, right_bh, + "black height mismatch at node with key {}", + node.key + ); + + let self_bh = if matches!(node.links().color, Color::Black) { + left_bh + 1 + } else { + left_bh + }; + + (1 + left_count + right_count, self_bh) + } + + fn lcg(seed: &mut u64) -> u64 { + *seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1); + *seed + } + + fn shuffle(ids: &mut [usize]) { + let mut seed = 0x6b8b_4567_9a1c_def0u64; + for i in (1..ids.len()).rev() { + let j = (lcg(&mut seed) % (i as u64 + 1)) as usize; + ids.swap(i, j); + } + } + + #[test] + fn insert_validates() { + let keys: Vec = (0..200).collect(); + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + let mut order: Vec = (0..keys.len()).collect(); + + shuffle(&mut order); + for id in order { + tree.insert(id, &mut store).unwrap(); + } + + validate_tree(&tree, &store, &keys); + } + + #[test] + fn reinsert_same_id_is_stable() { + let keys = vec![10, 5, 15]; + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + + tree.insert(0, &mut store).unwrap(); + tree.insert(1, &mut store).unwrap(); + tree.insert(2, &mut store).unwrap(); + + // Reinsert existing node id. This should not create duplicate structural links. + tree.insert(1, &mut store).unwrap(); + + let mut expected = keys.clone(); + expected.sort(); + validate_tree(&tree, &store, &expected); + } + + #[test] + fn min_updates_on_insert_and_remove() { + let keys = vec![10, 5, 15, 3, 7, 12, 18, 1, 6]; + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + + for id in 0..keys.len() { + tree.insert(id, &mut store).unwrap(); + } + + let mut sorted_keys = keys.clone(); + sorted_keys.sort(); + + validate_tree(&tree, &store, &sorted_keys); + assert_eq!(tree_min_key(&tree, &store), Some(1)); + + // Remove index 7 (key=1) + tree.remove(7, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 1); + validate_tree(&tree, &store, &sorted_keys); + assert_eq!(tree_min_key(&tree, &store), Some(3)); + + // Remove index 8 (key=6) + tree.remove(8, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 6); + validate_tree(&tree, &store, &sorted_keys); + assert_eq!(tree_min_key(&tree, &store), Some(3)); + + // Remove index 3 (key=3) + tree.remove(3, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 3); + validate_tree(&tree, &store, &sorted_keys); + assert_eq!(tree_min_key(&tree, &store), Some(5)); + } + + #[test] + fn remove_leaf_one_child_two_children() { + let keys = vec![10, 5, 15, 3, 7, 12, 18, 1, 6]; + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + + for id in 0..keys.len() { + tree.insert(id, &mut store).unwrap(); + } + + let mut sorted_keys = keys.clone(); + sorted_keys.sort(); + validate_tree(&tree, &store, &sorted_keys); + + // Remove node at index 4 (key=7) + tree.remove(4, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 7); + validate_tree(&tree, &store, &sorted_keys); + + // Remove node at index 3 (key=3) + tree.remove(3, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 3); + validate_tree(&tree, &store, &sorted_keys); + + // Remove node at index 7 (key=1) + tree.remove(7, &mut store).unwrap(); + sorted_keys.retain(|&x| x != 1); + validate_tree(&tree, &store, &sorted_keys); + } + + #[test] + fn remove_root_with_two_children() { + let keys = [8, 4, 12, 2, 6, 10, 14, 1, 3, 5, 7, 9, 11, 13, 15]; + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + + for id in 0..keys.len() { + tree.insert(id, &mut store).unwrap(); + } + + let mut sorted_keys: Vec = keys.to_vec(); + sorted_keys.sort(); + validate_tree(&tree, &store, &sorted_keys); + + let root_id = tree.root.expect("root missing"); + let root_key = store.get(root_id).expect("root missing").key; + + tree.remove(root_id, &mut store).unwrap(); + sorted_keys.retain(|&x| x != root_key); + validate_tree(&tree, &store, &sorted_keys); + } + + #[test] + fn remove_all_nodes() { + let keys: Vec = (0..128).collect(); + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + let mut order: Vec = (0..keys.len()).collect(); + shuffle(&mut order); + + for id in &order { + tree.insert(*id, &mut store).unwrap(); + } + + let mut remaining_keys = keys.clone(); + validate_tree(&tree, &store, &remaining_keys); + + for id in order { + let removed_key = keys[id]; + tree.remove(id, &mut store).unwrap(); + remaining_keys.retain(|&k| k != removed_key); + validate_tree(&tree, &store, &remaining_keys); + } + + assert_eq!(tree.root, None); + } + + #[test] + fn interleaved_operations() { + let keys: Vec = (0..100).collect(); + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + let mut order: Vec = (0..keys.len()).collect(); + shuffle(&mut order); + + // Build initial tree with 50 nodes + let mut active_keys: Vec = Vec::new(); + for id in order.iter().take(50) { + tree.insert(*id, &mut store).unwrap(); + active_keys.push(keys[*id]); + } + active_keys.sort(); + validate_tree(&tree, &store, &active_keys); + + // Alternate: remove oldest, insert new + for i in 0..50 { + let removed_key = keys[order[i]]; + tree.remove(order[i], &mut store).unwrap(); + active_keys.retain(|&k| k != removed_key); + validate_tree(&tree, &store, &active_keys); + + tree.insert(order[50 + i], &mut store).unwrap(); + active_keys.push(keys[order[50 + i]]); + active_keys.sort(); + validate_tree(&tree, &store, &active_keys); + } + } + + #[test] + fn stress_test() { + let keys: Vec = (0..500).collect(); + let mut store = NodeStore::new(&keys); + let mut tree = RbTree::new(); + let mut order: Vec = (0..keys.len()).collect(); + shuffle(&mut order); + + let mut seed = 0x6b8b_4567_9a1c_def0u64; + let mut active_nodes = Vec::new(); + let mut available_nodes = order.clone(); + + for _ in 0..10000 { + let do_insert = if active_nodes.is_empty() { + true + } else if available_nodes.is_empty() { + false + } else { + (lcg(&mut seed) % 10) < 7 + }; + + if do_insert { + let idx = (lcg(&mut seed) as usize) % available_nodes.len(); + let node_id = available_nodes.swap_remove(idx); + tree.insert(node_id, &mut store).unwrap(); + active_nodes.push(node_id); + } else { + let idx = (lcg(&mut seed) as usize) % active_nodes.len(); + let node_id = active_nodes.swap_remove(idx); + tree.remove(node_id, &mut store).unwrap(); + available_nodes.push(node_id); + } + + let mut expected_keys: Vec = active_nodes.iter().map(|&id| keys[id]).collect(); + expected_keys.sort(); + validate_tree(&tree, &store, &expected_keys); + } + + let mut expected_keys: Vec = active_nodes.iter().map(|&id| keys[id]).collect(); + expected_keys.sort(); + validate_tree(&tree, &store, &expected_keys); + } +} + +// END TESTING + +#[cfg(kani)] +mod verification { + use core::borrow::Borrow; + + use super::{Color, Compare, Linkable, Links, RbTree}; + use super::test_common::{Tree, Node}; + use crate::types::{array::IndexMap, traits::{Get, GetMut, ToIndex}}; + + + /// Assert the three RB colour invariants hold on every reachable node: + /// 1. Root is black. + /// 2. No red node has a red child. + /// 3. Every root-to-null path passes through the same number of black nodes. + /// + /// Also asserts child→parent back-pointers are consistent. + fn assert_rb_invariants( + root: Option, + storage: &IndexMap, + ) { + // 1. Root colour. + if let Some(rid) = root { + assert!( + matches!(storage.get(rid).unwrap().links().color, Color::Black), + "root must be black" + ); + } + + const SENTINEL: usize = usize::MAX; + // Stack entry: (raw_id [SENTINEL = null], accumulated_black_count) + let mut stack: [(usize, u32); 16] = [(SENTINEL, 0); 16]; + let mut sp = 0usize; + + // Push the root. + stack[sp] = (root.unwrap_or(SENTINEL), 0); + sp += 1; + + let mut expected_bh: i64 = -1; // not yet observed + + while sp > 0 { + sp -= 1; + let (raw_id, bh) = stack[sp]; + + if raw_id == SENTINEL { + // Null leaf: counts as one extra black level. + let path_bh = bh as i64 + 1; + if expected_bh < 0 { + expected_bh = path_bh; + } else { + assert_eq!(path_bh, expected_bh, "black-height invariant violated"); + } + continue; + } + + let node = storage.get(raw_id).unwrap(); + let links = node.links(); + let is_red = matches!(links.color, Color::Red); + let new_bh = if is_red { bh } else { bh + 1 }; + + // 2. No adjacent red nodes. + if is_red { + if let Some(l) = links.left { + assert!( + !matches!(storage.get(l).unwrap().links().color, Color::Red), + "red node has red left child" + ); + } + if let Some(r) = links.right { + assert!( + !matches!(storage.get(r).unwrap().links().color, Color::Red), + "red node has red right child" + ); + } + } + + // Parent-pointer consistency. + if let Some(l) = links.left { + assert_eq!( + storage.get(l).unwrap().links().parent, + Some(raw_id), + "left child has wrong parent pointer" + ); + } + if let Some(r) = links.right { + assert_eq!( + storage.get(r).unwrap().links().parent, + Some(raw_id), + "right child has wrong parent pointer" + ); + } + + // Push children (right first so left is processed first). + assert!(sp + 2 <= 16, "DFS stack overflow — tree deeper than expected"); + stack[sp] = (links.right.unwrap_or(SENTINEL), new_bh); + sp += 1; + stack[sp] = (links.left.unwrap_or(SENTINEL), new_bh); + sp += 1; + } + } + + /// Assert the BST ordering property: an iterative in-order traversal + /// visits keys in strictly ascending order. + fn assert_bst_order( + root: Option, + storage: &IndexMap, + ) { + // Iterative in-order: push nodes going left, pop and visit, then go right. + let mut stack: [usize; 16] = [0; 16]; + let mut sp = 0usize; + let mut cur = root; + let mut prev_key: Option = None; + + loop { + // Descend left. + while let Some(id) = cur { + assert!(sp < 16, "in-order stack overflow"); + stack[sp] = id; + sp += 1; + cur = storage.get(id).unwrap().links().left; + } + if sp == 0 { + break; + } + sp -= 1; + let id = stack[sp]; + let node = storage.get(id).unwrap(); + // Strictly increasing. + if let Some(prev) = prev_key { + assert!(node.key > prev, "BST order violated"); + } + prev_key = Some(node.key); + cur = node.links().right; + } + } + + /// Verify inserting a single node doesn't panic and min() is correct. + #[kani::proof] + fn verify_insert_single_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(42)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(0)); + } + + /// Verify inserting two nodes (both orderings via symbolic keys) doesn't panic, + /// and min() returns the node with the smaller key. + #[kani::proof] + #[kani::unwind(5)] + fn verify_insert_two_min_correct() { + let mut s: IndexMap = IndexMap::new(); + let key_a: i32 = kani::any(); + let key_b: i32 = kani::any(); + kani::assume(key_a != key_b); + + s.insert(&0, Node::new(key_a)).unwrap(); + s.insert(&1, Node::new(key_b)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + let min_key = s.get(min_id).unwrap().key; + assert!(min_key <= key_a); + assert!(min_key <= key_b); + } + + /// Verify insert of three concrete nodes exercises insert_fixup without bug!. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_three_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(1)); + } + + /// Verify remove of root from 3-node tree exercises delete_fixup without bug!. + #[kani::proof] + #[kani::unwind(6)] + fn verify_remove_root_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + tree.remove(0, &mut s).unwrap(); + + // After removing 10 (root), min must still be 5 (id=1) + assert_eq!(tree.min(), Some(1)); + } + + /// Verify removing the current minimum updates min() correctly. + #[kani::proof] + #[kani::unwind(6)] + fn verify_min_updates_after_remove_min() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(1)); + tree.remove(1, &mut s).unwrap(); // remove minimum (key=5) + assert_eq!(tree.min(), Some(0)); // new minimum is key=10 + } + + /// Verify that after removing all nodes the tree is empty. + #[kani::proof] + #[kani::unwind(6)] + fn verify_remove_all_empty() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + tree.remove(1, &mut s).unwrap(); + tree.remove(0, &mut s).unwrap(); + tree.remove(2, &mut s).unwrap(); + + assert!(tree.min().is_none()); + } + + /// Verify ascending-order insertion [1,2,3] — stress-tests right-leaning fixup. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_ascending_order() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(1)).unwrap(); + s.insert(&1, Node::new(2)).unwrap(); + s.insert(&2, Node::new(3)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, 1); + } + + // ----------------------------------------------------------------------- + // Invariant-checking proofs + // ----------------------------------------------------------------------- + + /// After inserting 3 concrete nodes the RB colour invariants and BST order hold. + /// Baseline check that the helpers themselves are correct on a small tree. + #[kani::proof] + #[kani::unwind(20)] + fn verify_rb_invariants_three_nodes() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// RB invariants hold after inserting 7 nodes in ascending order and after + /// removing all 7 nodes. Ascending insertion maximally stresses the fixup path. + /// Unwind budget: 7 inserts×3 fixup iters = 21; 7 removes×3 = 21; 2 DFS calls×15 = 30; + /// 2 in-order calls×7 = 14; total ≈ 86. Using unwind(100) for headroom. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_seven_ascending() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(i as i32 + 1)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove all and verify the tree empties cleanly. + for i in 0usize..7 { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.root.is_none()); + assert!(tree.min().is_none()); + assert_rb_invariants(tree.root, &s); // trivially true for empty tree + } + + /// RB invariants hold after inserting 7 nodes in descending order. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_seven_descending() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(7 - i as i32)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// RB invariants hold after balanced insertion [4,2,6,1,3,5,7] and after + /// removing the root and minimum. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_balanced_with_removes() { + let keys: [i32; 7] = [4, 2, 6, 1, 3, 5, 7]; + let mut s: IndexMap = IndexMap::new(); + for (i, &k) in keys.iter().enumerate() { + s.insert(&i, Node::new(k)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove root (key=4, idx=0). + tree.remove(0, &mut s).unwrap(); + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove current minimum (key=1, idx=3). + tree.remove(3, &mut s).unwrap(); + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// Verify descending-order insertion [3,2,1] — stress-tests left-leaning fixup. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_descending_order() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(3)).unwrap(); + s.insert(&1, Node::new(2)).unwrap(); + s.insert(&2, Node::new(1)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, 1); + } + + /// 7-node ascending insertion [1..7] — maximally stresses right-rotation fixup. + /// min() must equal 1 throughout; after removing all nodes tree is empty. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_ascending_insert_remove_all() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(i as i32 + 1)).unwrap(); // keys 1..=7 + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // Min must be key 1 (stored at index 0). + assert_eq!(tree.min(), Some(0)); + assert_eq!(s.get(0usize).unwrap().key, 1); + + // Remove all nodes in insertion order; min must update correctly. + for i in 0usize..7 { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } + + /// 7-node descending insertion [7..1] — maximally stresses left-rotation fixup. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_descending_insert_remove_all() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(7 - i as i32)).unwrap(); // keys 7,6,5,4,3,2,1 + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // After inserting [7,6,5,4,3,2,1], min is key 1 (stored at index 6). + assert_eq!(tree.min(), Some(6)); + assert_eq!(s.get(6usize).unwrap().key, 1); + + // Remove min repeatedly and verify it advances each time. + let expected_mins: [i32; 7] = [1, 2, 3, 4, 5, 6, 7]; + for &expected_key in &expected_mins { + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, expected_key); + tree.remove(min_id, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } + + /// 7-node BFS-level insertion [4,2,6,1,3,5,7] — produces a perfectly balanced tree. + /// Exercises the case where very few fixup rotations are needed. + /// Then removes nodes in reverse BFS order and checks min after each. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_balanced_insert_remove() { + // keys stored at their storage indices + // idx: 0=4, 1=2, 2=6, 3=1, 4=3, 5=5, 6=7 + let keys: [i32; 7] = [4, 2, 6, 1, 3, 5, 7]; + let mut s: IndexMap = IndexMap::new(); + for (i, &k) in keys.iter().enumerate() { + s.insert(&i, Node::new(k)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // Min is key 1 at index 3. + assert_eq!(tree.min(), Some(3)); + assert_eq!(s.get(3usize).unwrap().key, 1); + + // Remove the root (key=4, idx=0). + tree.remove(0, &mut s).unwrap(); + // New min is still key 1 (index 3). + assert_eq!(tree.min(), Some(3)); + + // Remove min (key=1, idx=3). New min = key 2 (idx=1). + tree.remove(3, &mut s).unwrap(); + assert_eq!(tree.min(), Some(1)); + assert_eq!(s.get(1usize).unwrap().key, 2); + + // Remove remaining 5 nodes. + for i in [1usize, 2, 4, 5, 6] { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } +} diff --git a/src/types/traits.rs b/src/types/traits.rs new file mode 100644 index 0000000..aa8c951 --- /dev/null +++ b/src/types/traits.rs @@ -0,0 +1,30 @@ +use core::borrow::Borrow; + +pub trait Get { + type Output: ?Sized; + + fn get>(&self, index: K) -> Option<&Self::Output>; +} + +pub trait GetMut: Get { + fn get_mut>(&mut self, index: K) -> Option<&mut Self::Output>; + + // Getting multiple disjoint mutable references at once + fn get2_mut>(&mut self, index1: K, index2: K) -> (Option<&mut Self::Output>, Option<&mut Self::Output>); + fn get3_mut>(&mut self, index1: K, index2: K, index3: K) -> (Option<&mut Self::Output>, Option<&mut Self::Output>, Option<&mut Self::Output>); +} + +pub trait ToIndex { + fn to_index>(index: Option) -> usize; +} + +impl ToIndex for usize { + fn to_index>(index: Option) -> usize { + index.map_or(0, |i| *i.borrow()) + } +} + +pub trait Project

{ + fn project(&self) -> Option<&P>; + fn project_mut(&mut self) -> Option<&mut P>; +} \ No newline at end of file diff --git a/src/types/view.rs b/src/types/view.rs new file mode 100644 index 0000000..de56096 --- /dev/null +++ b/src/types/view.rs @@ -0,0 +1,68 @@ +use core::borrow::Borrow; +use core::marker::PhantomData; + +use super::traits::{Get, GetMut, Project, ToIndex}; + +pub struct ViewMut<'a, K: ?Sized + ToIndex, P, S: GetMut> +where + S::Output: Project

, +{ + data: &'a mut S, + _k: PhantomData, + _proj: PhantomData

, +} + +impl<'a, K: ?Sized + ToIndex, P, S: GetMut> ViewMut<'a, K, P, S> +where + S::Output: Project

, +{ + pub fn new(data: &'a mut S) -> Self { + Self { + data, + _k: PhantomData, + _proj: PhantomData, + } + } + + pub fn with R, R>(data: &'a mut S, f: F) -> R { + let mut view = Self::new(data); + f(&mut view) + } +} + +impl<'a, K: ?Sized + ToIndex, P, S: GetMut> Get for ViewMut<'a, K, P, S> +where + S::Output: Project

, +{ + type Output = P; + + fn get>(&self, idx: Q) -> Option<&P> { + self.data.get(idx).and_then(Project::project) + } +} + +impl<'a, K: ?Sized + ToIndex, P, S: GetMut> GetMut for ViewMut<'a, K, P, S> +where + S::Output: Project

, +{ + fn get_mut>(&mut self, idx: Q) -> Option<&mut P> { + self.data.get_mut(idx).and_then(Project::project_mut) + } + + fn get2_mut>(&mut self, idx1: Q, idx2: Q) -> (Option<&mut P>, Option<&mut P>) { + let (a, b) = self.data.get2_mut(idx1, idx2); + ( + a.and_then(Project::project_mut), + b.and_then(Project::project_mut), + ) + } + + fn get3_mut>(&mut self, idx1: Q, idx2: Q, idx3: Q) -> (Option<&mut P>, Option<&mut P>, Option<&mut P>) { + let (a, b, c) = self.data.get3_mut(idx1, idx2, idx3); + ( + a.and_then(Project::project_mut), + b.and_then(Project::project_mut), + c.and_then(Project::project_mut), + ) + } +} \ No newline at end of file diff --git a/src/uapi.rs b/src/uapi.rs new file mode 100644 index 0000000..60a2b0e --- /dev/null +++ b/src/uapi.rs @@ -0,0 +1,3 @@ +pub mod print; +pub mod sched; +pub mod time; \ No newline at end of file diff --git a/src/uapi/print.rs b/src/uapi/print.rs new file mode 100644 index 0000000..d148107 --- /dev/null +++ b/src/uapi/print.rs @@ -0,0 +1,24 @@ +use core::fmt::{self, Write}; + +use hal::Machinelike; + +#[macro_export] +macro_rules! uprintln { + ($($arg:tt)*) => ({ + use core::fmt::Write; + use osiris::uapi::print::Printer; + + let mut printer = Printer; + printer.write_fmt(format_args!($($arg)*)).unwrap(); + printer.write_str("\n").unwrap(); + }); +} + +pub struct Printer; + +impl Write for Printer { + fn write_str(&mut self, s: &str) -> fmt::Result { + hal::Machine::print(s).map_err(|_| fmt::Error)?; + Ok(()) + } +} \ No newline at end of file diff --git a/src/uapi/sched.rs b/src/uapi/sched.rs new file mode 100644 index 0000000..ef39306 --- /dev/null +++ b/src/uapi/sched.rs @@ -0,0 +1,38 @@ +use hal::stack::EntryFn; + +pub fn sleep(until: u64) -> isize { + hal::asm::syscall!(1, (until >> 32) as u32, until as u32) +} + +pub fn sleep_for(duration: u64) -> isize { + hal::asm::syscall!(2, (duration >> 32) as u32, duration as u32) +} + +pub fn yield_thread() -> isize { + let until = u64::MAX; + hal::asm::syscall!(1, (until >> 32) as u32, until as u32) +} + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct RtAttrs { + pub deadline: u64, + pub period: u32, + pub budget: u32, +} + +pub fn spawn_thread(func_ptr: EntryFn, attrs: Option) -> isize { + let attr_ptr = if let Some(attrs) = attrs { + &attrs as *const RtAttrs as usize + } else { + 0 + }; + hal::asm::syscall!(3, func_ptr as u32, attr_ptr) +} + +pub fn exit(code: usize) -> ! { + hal::asm::syscall!(4, code as u32); + loop { + hal::asm::nop!(); + } +} \ No newline at end of file diff --git a/src/uapi/time.rs b/src/uapi/time.rs new file mode 100644 index 0000000..1b63ebb --- /dev/null +++ b/src/uapi/time.rs @@ -0,0 +1,9 @@ +use crate::time; + +pub fn mono_now() -> u64 { + time::mono_now() +} + +pub fn tick() -> u64 { + time::tick() +} \ No newline at end of file diff --git a/src/uspace.rs b/src/uspace.rs index c0d1d8e..91ae66b 100644 --- a/src/uspace.rs +++ b/src/uspace.rs @@ -1,22 +1,30 @@ //! This module provides access to userspace structures and services. -use ::core::mem::transmute; +use crate::{sched, time}; -pub fn init_app(boot_info: &crate::BootInfo) -> Result<(), crate::utils::KernelError> { - let len = boot_info.args.init.len; +unsafe extern "C" { + /// The entry point for the userspace application. + fn app_main() -> (); +} - if len == 0 { - return Err(crate::utils::KernelError::InvalidArgument); - } +extern "C" fn app_main_entry() { + unsafe { app_main() } +} - let entry = unsafe { - transmute::( - boot_info.args.init.begin as usize + boot_info.args.init.entry_offset as usize, - ) +pub fn init_app() { + let attrs = sched::thread::Attributes { + entry: app_main_entry, + fin: None, + attrs: None, }; - // We don't expect coming back from the init program. - // But for future user mode support the init program will be run by the scheduler, thus we leave a result as a return value here. - entry(); - Ok(()) + sched::with(|sched| { + if let Ok(uid) = sched.create_thread(Some(sched::task::KERNEL_TASK), &attrs) { + if sched.enqueue(time::tick(), uid).is_err() { + panic!("failed to enqueue init thread."); + } + } else { + panic!("failed to create init task."); + } + }) } diff --git a/src/utils.rs b/src/utils.rs deleted file mode 100644 index 8d4144f..0000000 --- a/src/utils.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Utility functions and definitions for the kernel. -#![cfg_attr(feature = "nightly", feature(likely_unlikely))] - -use core::fmt::Debug; - -/// These two definitions are copied from https://github.com/rust-lang/hashbrown -#[cfg(not(feature = "nightly"))] -#[allow(unused_imports)] -pub(crate) use core::convert::{identity as likely, identity as unlikely}; - -#[cfg(feature = "nightly")] -pub(crate) use core::hint::{likely, unlikely}; - -/// This is a macro that is used to panic when a bug is detected. -/// It is similar to the BUG() macro in the Linux kernel. Link: [https://www.kernel.org/]() -#[macro_export] -macro_rules! BUG { - () => { - panic!("BUG triggered at {}:{}", file!(), line!()); - }; - ($msg:expr) => { - panic!("BUG triggered: {} at {}:{}", $msg, file!(), line!()); - }; -} - -/// This is a macro that is used to panic when a condition is true. -/// It is similar to the BUG_ON() macro in the Linux kernel. Link: [https://www.kernel.org/]() -#[macro_export] -macro_rules! BUG_ON { - ($cond:expr) => {{ - let cond = $cond; - #[allow(unused_unsafe)] - if unsafe { $crate::utils::unlikely(cond) } { - BUG!(); - } - }}; - ($cond:expr, $msg:expr) => {{ - let cond = $cond; - #[allow(unused_unsafe)] - if unsafe { $crate::utils::unlikely(cond) } { - BUG!($msg); - } - }}; -} - -/// The error type that is returned when an error in the kernel occurs. -#[derive(PartialEq, Eq, Clone)] -pub enum KernelError { - /// The alignment is invalid. - InvalidAlign, - /// The kernel is out of memory. - OutOfMemory, - InvalidSize, - InvalidAddress, - InvalidArgument, - HalError(hal::Error), -} - -/// Debug msg implementation for KernelError. -impl Debug for KernelError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - KernelError::InvalidAlign => write!(f, "Invalid alignment"), - KernelError::OutOfMemory => write!(f, "Out of memory"), - KernelError::InvalidSize => write!(f, "Invalid size"), - KernelError::InvalidAddress => write!(f, "Invalid address"), - KernelError::InvalidArgument => write!(f, "Invalid argument"), - KernelError::HalError(e) => write!(f, "{e} (in HAL)"), - } - } -} - -impl From for KernelError { - fn from(err: hal::Error) -> Self { - KernelError::HalError(err) - } -} diff --git a/xtasks/Cargo.toml b/xtasks/Cargo.toml index bb63b52..96e7895 100644 --- a/xtasks/Cargo.toml +++ b/xtasks/Cargo.toml @@ -3,7 +3,7 @@ name = "xtask" version = "0.1.0" edition = "2024" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] logging = { path = "logging" } clap = "4.5.53" walkdir = "2.5.0" diff --git a/xtasks/crates/config/Cargo.toml b/xtasks/crates/config/Cargo.toml index 5428bb0..78dc4c2 100644 --- a/xtasks/crates/config/Cargo.toml +++ b/xtasks/crates/config/Cargo.toml @@ -3,7 +3,7 @@ name = "config" version = "0.1.0" edition = "2024" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] logging = { workspace = true } ratatui = "0.29.0" crossterm = "0.27" diff --git a/xtasks/crates/config/src/file.rs b/xtasks/crates/config/src/file.rs index 0b97663..abc5270 100644 --- a/xtasks/crates/config/src/file.rs +++ b/xtasks/crates/config/src/file.rs @@ -20,6 +20,15 @@ pub fn load_file(path: &Path) -> Result { }) } +pub fn create_if_not_exists(path: &Path) -> Result<()> { + if !path.exists() { + std::fs::write(path, "") + .with_context(|| format!("failed to create file {}", path.display()))?; + } + + Ok(()) +} + pub fn load_files(root: &Path, filename: &str) -> Vec> { let mut files = Vec::new(); diff --git a/xtasks/crates/config/src/lib.rs b/xtasks/crates/config/src/lib.rs index 60172ee..a5f0704 100644 --- a/xtasks/crates/config/src/lib.rs +++ b/xtasks/crates/config/src/lib.rs @@ -1,3 +1,6 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg(not(target_os = "none"))] + use std::{path::Path, process::exit}; use crate::{ @@ -21,8 +24,7 @@ mod toml_patch; pub mod types; pub mod ui; -use anyhow::anyhow; -use toml_edit::{DocumentMut, ImDocument, Item, Table}; +use toml_edit::{DocumentMut, ImDocument}; pub fn load_config(root: &Path, filename: &str) -> ConfigNode { let files = file::load_files(root, filename); @@ -116,6 +118,7 @@ pub fn load_state<'node>( } pub fn load_toml_mut(toml: &Path) -> Result { + file::create_if_not_exists(&toml)?; let File { path, content } = file::load_file(&toml)?; let path = path.to_string_lossy(); @@ -151,41 +154,11 @@ pub fn load_toml(toml: &Path) -> Result, Error> { Ok(doc) } -#[rustversion::since(1.94)] -compile_error!("config-includes are stable since Rust 1.94; fix the TODOs below."); - pub fn apply_preset(config: &mut DocumentMut, preset: &ImDocument) -> Result<(), Error> { - for (key, value) in preset.iter() { - // We override with a depth of zero or one granularity. - - // TODO: Until we have config-includes stabilized, we skip alias sections. - if key == "alias" { - continue; - } - - match value { - Item::Table(src) => { - let dst = config.entry(key).or_insert(Item::Table(Table::new())); - - if let Item::Table(dst) = dst { - dst.clear(); + config.clear(); - for (key, value) in src.iter() { - dst.insert(key, value.clone()); - } - } else { - return Err(anyhow!( - "type mismatch when applying preset key '{}': expected table, found {}", - key, - dst.type_name() - ) - .into()); - } - } - _ => { - config.insert(key, value.clone()); - } - } + for (key, value) in preset.iter() { + config.insert(key, value.clone()); } Ok(()) diff --git a/xtasks/crates/config/src/main.rs b/xtasks/crates/config/src/main.rs index 85320c1..00f1244 100644 --- a/xtasks/crates/config/src/main.rs +++ b/xtasks/crates/config/src/main.rs @@ -1,3 +1,7 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use std::path::{Path, PathBuf}; use config::error::Error; @@ -58,7 +62,9 @@ pub fn main() { } fn ask_confirmation(prompt: &str) -> bool { - print!("{} (y/N): ", prompt); + print!("{}\n\n(y/N): ", + prompt + ); if let Err(_) = std::io::Write::flush(&mut std::io::stdout()) { return false; @@ -79,14 +85,13 @@ fn run_load_preset(preset_name: &str, no_confirm: bool, current_dir: &Path) -> R let preset_path = PathBuf::from("presets").join(format!("{preset_name}.toml")); let preset = config::load_toml(&preset_path)?; - let config_path = current_dir.join(".cargo/config.toml"); + let config_path = current_dir.join("config.toml"); let mut config = config::load_toml_mut(&config_path)?; // Ask for confirmation if !no_confirm - && !ask_confirmation(&format!( - "Are you sure you want to apply the preset '{preset_name}' to {}?\nThis will overwrite all existing configuration options.", + && !ask_confirmation(&format!("\nApply preset '{preset_name}' to '{}'?\nThis overwrites all existing configuration options.", config_path.display() )) { @@ -111,14 +116,14 @@ fn run_clean(no_confirm: bool, current_dir: &Path) -> Result<(), Error> { // Ask for confirmation if !no_confirm && !ask_confirmation( - "Are you sure you want to remove all configuration options from .cargo/config.toml?", + "Are you sure you want to remove all configuration options from config.toml?", ) { log::info!("Abort."); return Ok(()); } - let config_path = current_dir.join(".cargo/config.toml"); + let config_path = current_dir.join("config.toml"); let mut config = config::load_toml_mut(&config_path)?; @@ -142,7 +147,7 @@ fn run_clean(no_confirm: bool, current_dir: &Path) -> Result<(), Error> { } fn run_ui(current_dir: &Path) { - let config_path = current_dir.join(".cargo/config.toml"); + let config_path = current_dir.join("config.toml"); let node = config::load_config(¤t_dir, "options.toml"); diff --git a/xtasks/crates/dtgen/Cargo.toml b/xtasks/crates/dtgen/Cargo.toml index 2cedf04..432387d 100644 --- a/xtasks/crates/dtgen/Cargo.toml +++ b/xtasks/crates/dtgen/Cargo.toml @@ -11,10 +11,13 @@ path = "src/lib.rs" name = "dtgen" path = "src/main.rs" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] fdt = "0.1.5" +logging = { workspace = true } +log = "0.4.27" clap = { version = "4", features = ["derive"] } quote = "1" proc-macro2 = "1" prettyplease = "0.2" syn = { version = "2", features = ["full"] } +indoc = "2.0.7" \ No newline at end of file diff --git a/xtasks/crates/dtgen/src/ldgen.rs b/xtasks/crates/dtgen/src/ldgen.rs new file mode 100644 index 0000000..e0b5041 --- /dev/null +++ b/xtasks/crates/dtgen/src/ldgen.rs @@ -0,0 +1,84 @@ +use crate::ir::DeviceTree; + +fn format_region(name: &str, base: u64, size: u64) -> String { + format!( + " {} : ORIGIN = 0x{:08x}, LENGTH = 0x{:08x}", + name, base, size + ) +} + +fn format_memory_section(regions: &[(&str, u64, u64)]) -> String { + let regions = regions + .iter() + .map(|&(name, base, size)| format_region(name, base, size)) + .collect::>() + .join("\n"); + + indoc::formatdoc! {" + /* This file is @generated by dtgen. Do not edit. */ + MEMORY {{ + {regions} + }} + ", regions = regions} +} + +fn format_irq_provides(num: u32) -> String { + format!("PROVIDE(__irq_{}_handler = default_handler);", num) +} + +fn coalesce_regions<'a>(name: &'a str, regions: Vec<(&'a str, u64, u64)>) -> Result, String> { + regions + .clone() + .into_iter() + .try_fold(None, |acc, (_, base, size)| { + if let Some((_, acc_base, acc_size)) = acc { + if base > acc_base + acc_size || acc_base > base + size { + return Err(format!("Regions are not contiguous. {regions:?}")); + } + + let end = (base + size).max(acc_base + acc_size); + let base = base.min(acc_base); + let size = end - base; + Ok(Some((name, base, size))) + } else { + Ok(Some((name, base, size))) + } + }) +} + +pub fn generate_ld(dt: &DeviceTree) -> Result { + // Generates a linker script prelude that defines the memory regions for the device tree. + + let mut ram: Vec<(&str, u64, u64)> = dt + .nodes + .iter() + .filter(|n| n.name.starts_with("memory@") || n.name == "memory") + .filter_map(|n| { + let (base, size) = n.reg?; + Some((n.name.as_str(), base, size)) + }) + .collect(); + ram.sort_by_key(|&(_, base, _)| base); + + let mut flash: Vec<(&str, u64, u64)> = dt + .nodes + .iter() + .filter(|n| n.name.starts_with("flash@") || n.name == "flash") + .filter_map(|n| { + let (base, size) = n.reg?; + Some((n.name.as_str(), base, size)) + }) + .collect(); + flash.sort_by_key(|&(_, base, _)| base); + + let flash = coalesce_regions("FLASH", flash)?; + let ram = coalesce_regions("RAM", ram)?; + + let regions = flash.into_iter().chain(ram).collect::>(); + + if regions.is_empty() { + return Err("No memory regions found in device tree".to_string()); + } + + Ok(format_memory_section(®ions)) +} diff --git a/xtasks/crates/dtgen/src/lib.rs b/xtasks/crates/dtgen/src/lib.rs index 36d6917..aa3d761 100644 --- a/xtasks/crates/dtgen/src/lib.rs +++ b/xtasks/crates/dtgen/src/lib.rs @@ -1,13 +1,24 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg(not(target_os = "none"))] + mod codegen; -mod ir; +pub mod ir; mod parser; +mod ldgen; use std::path::Path; -pub fn run(dts_path: &Path, include_dirs: &[&Path], out_path: &Path) -> Result<(), String> { +use crate::ir::DeviceTree; + +pub fn parse_dts(dts_path: &Path, include_dirs: &[&Path]) -> Result { let dtb = parser::dts_to_dtb(dts_path, include_dirs)?; - let dt = parser::dtb_to_devicetree(&dtb)?; - let src = codegen::generate_rust(&dt); - std::fs::write(out_path, src) - .map_err(|e| format!("dtgen: failed to write {}: {e}", out_path.display())) + parser::dtb_to_devicetree(&dtb) +} + +pub fn generate_rust(dt: &DeviceTree) -> String { + codegen::generate_rust(dt) +} + +pub fn generate_ld(dt: &DeviceTree) -> Result { + ldgen::generate_ld(dt) } diff --git a/xtasks/crates/dtgen/src/main.rs b/xtasks/crates/dtgen/src/main.rs index 370bf88..e2a917e 100644 --- a/xtasks/crates/dtgen/src/main.rs +++ b/xtasks/crates/dtgen/src/main.rs @@ -1,3 +1,7 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use clap::Parser; use std::path::PathBuf; @@ -21,11 +25,25 @@ struct Args { } fn main() { + logging::init(); let args = Args::parse(); let refs: Vec<&std::path::Path> = args.include_dirs.iter().map(|p| p.as_path()).collect(); - dtgen::run(&args.input, &refs, &args.output).unwrap_or_else(|e| { - eprintln!("dtgen error: {e}"); + let dt = dtgen::parse_dts(&args.input, &refs).unwrap_or_else(|e| { + log::error!("dtgen error: Failed to parse device tree: {e}"); + std::process::exit(1); + }); + + let output = args.output.as_path(); + std::fs::create_dir_all(output.parent().unwrap()).unwrap_or_else(|e| { + log::error!("dtgen error: Failed to create output directory: {e}"); + std::process::exit(1); + }); + + let content = dtgen::generate_rust(&dt); + + std::fs::write(&args.output, content).unwrap_or_else(|e| { + log::error!("dtgen error: Failed to write output file: {e}"); std::process::exit(1); }); } diff --git a/xtasks/crates/injector/Cargo.toml b/xtasks/crates/injector/Cargo.toml index 9364c7e..0faa7cf 100644 --- a/xtasks/crates/injector/Cargo.toml +++ b/xtasks/crates/injector/Cargo.toml @@ -3,7 +3,7 @@ name = "injector" version = "0.1.0" edition = "2024" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] logging = { workspace = true } clap = { version = "4.5", features = ["derive"] } object = "0.36" diff --git a/xtasks/crates/injector/src/main.rs b/xtasks/crates/injector/src/main.rs index 973b8c7..3171e88 100644 --- a/xtasks/crates/injector/src/main.rs +++ b/xtasks/crates/injector/src/main.rs @@ -1,3 +1,7 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use cargo_metadata::MetadataCommand; use clap::Parser; use object::{Object, ObjectSection}; @@ -104,7 +108,7 @@ fn inject(elf: &PathBuf) -> Result<(), String> { } fn get_target_from_cargo_config(manifest_dir: &PathBuf) -> Option { - let cargo_config = manifest_dir.join(".cargo").join("config.toml"); + let cargo_config = manifest_dir.join("config.toml"); if !cargo_config.exists() { return None; diff --git a/xtasks/crates/pack/Cargo.toml b/xtasks/crates/pack/Cargo.toml index 7ca2656..a9d72eb 100644 --- a/xtasks/crates/pack/Cargo.toml +++ b/xtasks/crates/pack/Cargo.toml @@ -3,14 +3,13 @@ name = "pack" version = "0.1.0" edition = "2024" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] anyhow = "1.0.100" logging = { workspace = true } clap = { version = "4.5.47", features = ["derive"] } crc-fast = "1.8.0" elf = "0.8.0" log = "0.4.27" -interface = { workspace = true } bytemuck = { version = "1.24.0", features = ["derive"] } tempfile = "3.23.0" cargo_metadata = "0.23.1" diff --git a/xtasks/crates/pack/src/bootinfo.rs b/xtasks/crates/pack/src/bootinfo.rs deleted file mode 100644 index 8f9fcf3..0000000 --- a/xtasks/crates/pack/src/bootinfo.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::image; - -pub struct BootInfo { - inner: Vec, -} - -impl BootInfo { - pub fn new(img_paddr: usize, section: &image::Section) -> Self { - let boot_info = interface::BootInfo { - magic: interface::BOOT_INFO_MAGIC, - version: 1, - mmap: [interface::MemMapEntry { - size: 0, - addr: 0, - length: 0, - ty: 0, - }; 8], - mmap_len: 0, - args: interface::Args { - init: interface::InitDescriptor { - begin: (img_paddr + section.offset()) as u64, - len: section.size() as u64, - entry_offset: section.entry_offset() as u64, - }, - }, - }; - - let boot_info_bytes = bytemuck::bytes_of(&boot_info); - - Self { - inner: boot_info_bytes.to_vec(), - } - } - - pub fn inner(&self) -> &Vec { - &self.inner - } -} - -// Tests for bootinfo -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_bootinfo_fields() { - let boot_info = BootInfo::new( - 0x4000, - &image::Section::from_parts(0x4000, 0x2000, 0x100, 0x1000, 0, false), - ); - - // Deserialize back to struct for comparison - assert_eq!( - boot_info.inner().len(), - std::mem::size_of::() - ); - - let reconstructed: interface::BootInfo = - unsafe { std::ptr::read(boot_info.inner().as_ptr() as *const interface::BootInfo) }; - - assert_eq!(reconstructed.magic, interface::BOOT_INFO_MAGIC); - assert_eq!(reconstructed.version, 1); - assert_eq!(reconstructed.args.init.begin, 0x4000 + 0x4000); - assert_eq!(reconstructed.args.init.len, 0x2000); - assert_eq!(reconstructed.args.init.entry_offset, 0x100); - } -} diff --git a/xtasks/crates/pack/src/main.rs b/xtasks/crates/pack/src/main.rs index 5e985a8..6eba097 100644 --- a/xtasks/crates/pack/src/main.rs +++ b/xtasks/crates/pack/src/main.rs @@ -1,8 +1,11 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use std::path::PathBuf; use clap::Parser; -mod bootinfo; mod elf; mod image; mod pack; diff --git a/xtasks/crates/pack/src/pack.rs b/xtasks/crates/pack/src/pack.rs index e4cf12f..6cab370 100644 --- a/xtasks/crates/pack/src/pack.rs +++ b/xtasks/crates/pack/src/pack.rs @@ -4,7 +4,6 @@ use anyhow::{Result, anyhow, bail}; use cargo_metadata::MetadataCommand; use crate::{ - bootinfo, elf::ElfInfo, image::{self}, }; @@ -126,8 +125,7 @@ pub fn pack(init_info: &ElfInfo, kernel_info: &mut ElfInfo, out: &Path) -> Resul let init_section = img.add_elf(init_info, image::SectionDescripter::Loadable(None))?; // Patch bootinfo into kernel. - let boot_info = bootinfo::BootInfo::new(img.paddr(), &init_section); - kernel_info.patch_section(".bootinfo", 0, boot_info.inner())?; + //kernel_info.patch_section(".bootinfo", 0, boot_info.inner())?; // Update kernel in image. img.update(kernel_info, 0)?; diff --git a/xtasks/logging/Cargo.toml b/xtasks/logging/Cargo.toml index be24d26..fb270e1 100644 --- a/xtasks/logging/Cargo.toml +++ b/xtasks/logging/Cargo.toml @@ -3,7 +3,7 @@ name = "logging" version = "0.1.0" edition = "2024" -[dependencies] +[target.'cfg(not(target_os = "none"))'.dependencies] env_logger = "0.11.8" log = "0.4.29" once_cell = "1.21.3" diff --git a/xtasks/logging/src/lib.rs b/xtasks/logging/src/lib.rs index 9b64f20..0bec258 100644 --- a/xtasks/logging/src/lib.rs +++ b/xtasks/logging/src/lib.rs @@ -1,3 +1,7 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use std::io::Write; use std::sync::OnceLock; diff --git a/xtasks/src/main.rs b/xtasks/src/main.rs index ad7cb3a..a2a4820 100644 --- a/xtasks/src/main.rs +++ b/xtasks/src/main.rs @@ -1,3 +1,7 @@ +#![cfg_attr(target_os = "none", no_std)] +#![cfg_attr(target_os = "none", no_main)] +#![cfg(not(target_os = "none"))] + use std::path::{Path, PathBuf}; use cargo_metadata::{MetadataCommand, TargetKind};