diff --git a/CMakeLists.txt b/CMakeLists.txt index 13a8bc43..21c88e57 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,6 +4,9 @@ set(RUST_MODULE_DIR "${CMAKE_CURRENT_LIST_DIR}" CACHE INTERNAL "") +# Initially, we just have a single DT augment file. +set(DT_AUGMENTS "${CMAKE_CURRENT_LIST_DIR}/dt-rust.yaml" CACHE INTERNAL "") + # Zephyr targets are defined through Kconfig. We need to map these to # an appropriate llvm target triple. This sets `RUST_TARGET` in the # parent scope, or an error if the target is not yet supported by @@ -54,7 +57,7 @@ endfunction() function(get_include_dirs target dirs) get_target_property(include_dirs ${target} INTERFACE_INCLUDE_DIRECTORIES) if(include_dirs) - set(${dirs} ${include_dirs} PARENT_SCOPE) + set(${dirs} "${include_dirs}" PARENT_SCOPE) else() set(${dirs} "" PARENT_SCOPE) endif() @@ -138,6 +141,8 @@ ZEPHYR_DTS = \"${ZEPHYR_DTS}\" INCLUDE_DIRS = \"${include_dirs}\" INCLUDE_DEFINES = \"${include_defines}\" WRAPPER_FILE = \"${WRAPPER_FILE}\" +BINARY_DIR_INCLUDE_GENERATED = \"${BINARY_DIR_INCLUDE_GENERATED}\" +DT_AUGMENTS = \"${DT_AUGMENTS}\" [patch.crates-io] ${config_paths} @@ -161,6 +166,8 @@ ${config_paths} INCLUDE_DIRS="${include_dirs}" INCLUDE_DEFINES="${include_defines}" WRAPPER_FILE="${WRAPPER_FILE}" + DT_AUGMENTS="${DT_AUGMENTS}" + BINARY_DIR_INCLUDE_GENERATED="${BINARY_DIR_INCLUDE_GENERATED}" cargo build ${rust_build_type_arg} @@ -201,6 +208,8 @@ ${config_paths} INCLUDE_DIRS="${include_dirs}" INCLUDE_DEFINES="${include_defines}" WRAPPER_FILE="${WRAPPER_FILE}" + DT_AUGMENTS="${DT_AUGMENTS}" + BINARY_DIR_INCLUDE_GENERATED="${BINARY_DIR_INCLUDE_GENERATED}" cargo doc ${rust_build_type_arg} diff --git a/dt-rust.yaml b/dt-rust.yaml new file mode 100644 index 00000000..2837cf3f --- /dev/null +++ b/dt-rust.yaml @@ -0,0 +1,173 @@ +# Description of how to augment the devicetree for Rust. +# +# Each entry describes an augmentation that will be added to matching nodes in the device tree. +# The full syntax is described (indirectly) in `zephyr-build/src/devicetree/config.rs`. + +# Gpio controllers match for every node that has a `gpio-controller` property. This is one of the +# few instances were we can actually just match on a property. +- name: gpio-controller + rules: + - type: has_prop + value: gpio-controller + actions: + - type: instance + value: + raw: + type: myself + value: + args: [] + device: crate::device::gpio::Gpio + +# The gpio-leds node will have #children nodes describing each led. We'll match on the parent +# having this compatible property. The nodes themselves are built out of the properties associated +# with each gpio. +- name: gpio-leds + rules: + - type: compatible + value: + names: + - gpio-leds + level: 1 + actions: + - type: instance + value: + raw: + type: phandle + value: gpios + device: crate::device::gpio::GpioPin + +# Flash controllers don't have any particular property to identify them, so we need a list of +# compatible values that should match. +- name: flash-controller + rules: + - type: compatible + value: + names: + - "nordic,nrf52-flash-controller" + - "nordic,nrf51-flash-controller" + - "raspberrypi,pico-flash-controller" + level: 0 + actions: + - type: instance + value: + raw: + type: myself + value: + args: [] + device: crate::device::flash::FlashController + +# Flash partitions exist as children of a node compatible with "soc-nv-flash" that itself is a child +# of the controller itself. +# TODO: Get the write and erase property from the DT if present. +- name: flash-partition + rules: + - type: compatible + value: + names: + - "fixed-partitions" + level: 1 + - type: compatible + value: + names: + - "soc-nv-flash" + level: 2 + actions: + - type: instance + value: + raw: + type: parent + value: + level: 3 + args: + - type: reg + device: "crate::device::flash::FlashPartition" + +# Uart devices. This just has to be a list of devices that implement this interface. +- name: uart + rules: + - type: compatible + value: + names: + - "arm,pl011" + # The nordic driver needs to be separate because they have a separate Kconfig for each uart + # block. + # - "nordic,nrf-uarte" + - "zephyr,cdc-acm-uart" + level: 0 + actions: + - type: instance + value: + raw: + type: myself + value: + args: [] + device: "crate::device::uart::Uart" + kconfig: CONFIG_SERIAL + +- name: led-strip + rules: + - type: or + value: + - type: compatible + value: + names: + - "worldsemi,wd2812-spi" + level: 0 + - type: compatible + value: + names: + - "worldsemi,ws2812-rpi_pico-pio" + level: 1 + actions: + - type: instance + value: + raw: + type: myself + value: + args: [] + device: "crate::device::led_strip::LedStrip" + kconfig: CONFIG_LED_STRIP + +- name: pwm-leds + rules: + - type: compatible + value: + names: + - "pwm-leds" + level: 0 + actions: + - type: instance + value: + raw: + type: myself + value: + args: + - type: child_count + device: "crate::device::led::Leds" + +# This doesn't really belong here, and can be moved once we support modules having their own augment +# files. +- name: bbq-kbd-matrix + rules: + - type: compatible + value: + names: + - "bbq-kbd-matrix" + level: 0 + actions: + - type: gpio_pins + value: + property: "row-gpios" + getter: "get_rows" + - type: gpio_pins + value: + property: "col-gpios" + getter: "get_cols" + +# Generate a pseudo node that matches all of the labels across the tree with their nodes. +- name: labels + rules: + - type: root + actions: + - type: labels + diff --git a/etc/platforms.txt b/etc/platforms.txt index e459795d..42b27bc6 100644 --- a/etc/platforms.txt +++ b/etc/platforms.txt @@ -1,5 +1,3 @@ --p mps2/an385 --p mps2/an521/cpu0 -p qemu_cortex_m0 -p qemu_cortex_m3 -p qemu_riscv32 diff --git a/samples/blinky/CMakeLists.txt b/samples/blinky/CMakeLists.txt new file mode 100644 index 00000000..9efa442c --- /dev/null +++ b/samples/blinky/CMakeLists.txt @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 OR MIT + +cmake_minimum_required(VERSION 3.20.0) +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(blinky) + +rust_cargo_application() diff --git a/samples/blinky/Cargo.toml b/samples/blinky/Cargo.toml new file mode 100644 index 00000000..9bf5f2ce --- /dev/null +++ b/samples/blinky/Cargo.toml @@ -0,0 +1,20 @@ +# Copyright (c) 2024 Linaro LTD +# SPDX-License-Identifier: Apache-2.0 + +[package] +# This must be rustapp for now. +name = "rustapp" +version = "0.1.0" +edition = "2021" +description = "Blink an LED forever using the GPIO API" +license = "Apache-2.0 OR MIT" + +[lib] +crate-type = ["staticlib"] + +[dependencies] +zephyr = "0.1.0" +log = "0.4.22" + +[build-dependencies] +zephyr-build = "0.1.0" diff --git a/samples/blinky/README.rst b/samples/blinky/README.rst new file mode 100644 index 00000000..ec23fe54 --- /dev/null +++ b/samples/blinky/README.rst @@ -0,0 +1,97 @@ +.. zephyr:code-sample:: blinky + :name: Blinky + :relevant-api: gpio_interface + + Blink an LED forever using the GPIO API. + +Overview +******** + +The Blinky sample blinks an LED forever using the :ref:`GPIO API `. + +The source code shows how to: + +#. Get a pin specification from the :ref:`devicetree ` as a + :c:struct:`gpio_dt_spec` +#. Configure the GPIO pin as an output +#. Toggle the pin forever + +See :zephyr:code-sample:`pwm-blinky` for a similar sample that uses the PWM API instead. + +.. _blinky-sample-requirements: + +Requirements +************ + +Your board must: + +#. Have an LED connected via a GPIO pin (these are called "User LEDs" on many of + Zephyr's :ref:`boards`). +#. Have the LED configured using the ``led0`` devicetree alias. + +Building and Running +******************** + +Build and flash Blinky as follows, changing ``reel_board`` for your board: + +.. zephyr-app-commands:: + :zephyr-app: samples/basic/blinky + :board: reel_board + :goals: build flash + :compact: + +After flashing, the LED starts to blink and messages with the current LED state +are printed on the console. If a runtime error occurs, the sample exits without +printing to the console. + +Build errors +************ + +You will see a build error at the source code line defining the ``struct +gpio_dt_spec led`` variable if you try to build Blinky for an unsupported +board. + +On GCC-based toolchains, the error looks like this: + +.. code-block:: none + + error: '__device_dts_ord_DT_N_ALIAS_led_P_gpios_IDX_0_PH_ORD' undeclared here (not in a function) + +Adding board support +******************** + +To add support for your board, add something like this to your devicetree: + +.. code-block:: DTS + + / { + aliases { + led0 = &myled0; + }; + + leds { + compatible = "gpio-leds"; + myled0: led_0 { + gpios = <&gpio0 13 GPIO_ACTIVE_LOW>; + }; + }; + }; + +The above sets your board's ``led0`` alias to use pin 13 on GPIO controller +``gpio0``. The pin flags :c:macro:`GPIO_ACTIVE_HIGH` mean the LED is on when +the pin is set to its high state, and off when the pin is in its low state. + +Tips: + +- See :dtcompatible:`gpio-leds` for more information on defining GPIO-based LEDs + in devicetree. + +- If you're not sure what to do, check the devicetrees for supported boards which + use the same SoC as your target. See :ref:`get-devicetree-outputs` for details. + +- See :zephyr_file:`include/zephyr/dt-bindings/gpio/gpio.h` for the flags you can use + in devicetree. + +- If the LED is built in to your board hardware, the alias should be defined in + your :ref:`BOARD.dts file `. Otherwise, you can + define one in a :ref:`devicetree overlay `. diff --git a/samples/blinky/build.rs b/samples/blinky/build.rs new file mode 100644 index 00000000..f3849d1e --- /dev/null +++ b/samples/blinky/build.rs @@ -0,0 +1,9 @@ +fn main() { + // This call will make make config entries available in the code for every device tree node, to + // allow conditional compilation based on whether it is present in the device tree. + // For example, it will be possible to have: + // ```rust + // #[cfg(dt = "aliases::led0")] + // ``` + zephyr_build::dt_cfgs(); +} diff --git a/samples/blinky/prj.conf b/samples/blinky/prj.conf new file mode 100644 index 00000000..1ff6fb75 --- /dev/null +++ b/samples/blinky/prj.conf @@ -0,0 +1,10 @@ +CONFIG_GPIO=y + +CONFIG_RUST=y +CONFIG_RUST_ALLOC=y + +CONFIG_DEBUG=y +CONFIG_MAIN_STACK_SIZE=8192 + +# Verify that userspace builds work. +# CONFIG_USERSPACE=y diff --git a/samples/blinky/sample.yaml b/samples/blinky/sample.yaml new file mode 100644 index 00000000..2b37187d --- /dev/null +++ b/samples/blinky/sample.yaml @@ -0,0 +1,13 @@ +# See doc/develop/test/twister.rst for what is here. +sample: + name: Blinky Sample +tests: + sample.basic.blinky: + tags: + - LED + - gpio + filter: dt_enabled_alias_with_parent_compat("led0", "gpio-leds") + depends_on: gpio + harness: led + integration_platforms: + - frdm_k64f diff --git a/samples/blinky/src/lib.rs b/samples/blinky/src/lib.rs new file mode 100644 index 00000000..5a22f03a --- /dev/null +++ b/samples/blinky/src/lib.rs @@ -0,0 +1,52 @@ +// Copyright (c) 2024 Linaro LTD +// SPDX-License-Identifier: Apache-2.0 + +#![no_std] + +// Sigh. The check config system requires that the compiler be told what possible config values +// there might be. This is completely impossible with both Kconfig and the DT configs, since the +// whole point is that we likely need to check for configs that aren't otherwise present in the +// build. So, this is just always necessary. +#![allow(unexpected_cfgs)] + +use log::warn; + +use zephyr::raw::GPIO_OUTPUT_ACTIVE; +use zephyr::time::{ Duration, sleep }; + +#[no_mangle] +extern "C" fn rust_main() { + unsafe { zephyr::set_logger().unwrap(); } + + warn!("Starting blinky"); + + do_blink(); +} + +#[cfg(dt = "aliases::led0")] +fn do_blink() { + warn!("Inside of blinky"); + + let mut led0 = zephyr::devicetree::aliases::led0::get_instance().unwrap(); + let mut gpio_token = unsafe { zephyr::device::gpio::GpioToken::get_instance().unwrap() }; + + if !led0.is_ready() { + warn!("LED is not ready"); + loop { + } + } + + unsafe { led0.configure(&mut gpio_token, GPIO_OUTPUT_ACTIVE); } + let duration = Duration::millis_at_least(500); + loop { + unsafe { led0.toggle_pin(&mut gpio_token); } + sleep(duration); + } +} + +#[cfg(not(dt = "aliases::led0"))] +fn do_blink() { + warn!("No leds configured"); + loop { + } +} diff --git a/samples/work-philosophers/CMakeLists.txt b/samples/work-philosophers/CMakeLists.txt new file mode 100644 index 00000000..e118b2c3 --- /dev/null +++ b/samples/work-philosophers/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.20.0) + +find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE}) +project(work_philosophers) + +rust_cargo_application() diff --git a/samples/work-philosophers/Cargo.toml b/samples/work-philosophers/Cargo.toml new file mode 100644 index 00000000..af2dcd45 --- /dev/null +++ b/samples/work-philosophers/Cargo.toml @@ -0,0 +1,25 @@ +# Copyright (c) 2024 Linaro LTD +# SPDX-License-Identifier: Apache-2.0 + +[package] +# This must be rustapp for now. +name = "rustapp" +version = "0.1.0" +edition = "2021" +description = "A sample hello world application in Rust" +license = "Apache-2.0 or MIT" + +[lib] +crate-type = ["staticlib"] + +[dependencies] +zephyr = "0.1.0" + +# Dependencies that are used by build.rs. +[build-dependencies] +zephyr-build = "0.1.0" + +[profile.release] +debug-assertions = true +overflow-checks = true +debug = true diff --git a/samples/work-philosophers/Kconfig b/samples/work-philosophers/Kconfig new file mode 100644 index 00000000..d8ffa99d --- /dev/null +++ b/samples/work-philosophers/Kconfig @@ -0,0 +1,56 @@ +# Copyright (c) 2024 Linaro LTD +# SPDX-License-Identifier: Apache-2.0 + +mainmenu "Rust Dining Philosphers" + +source "Kconfig.zephyr" + +choice + prompt "Select Synchronization implementation" + default SYNC_CHANNEL + + config SYNC_SYS_SEMAPHORE + bool "Use sys::Semaphore to synchronize forks" + help + Use to have the dining philosophers sample use sys::Semaphore, with one per fork, to + synchronize. + + config SYNC_SYS_DYNAMIC_SEMAPHORE + bool "Use a dynamic sys::Semaphore to synchronize forks" + help + Use to have the dining philosophers sample use sys::Semaphore, with one per fork, to + synchronize. The Semaphores will be dynamically allocated. + + config SYNC_SYS_MUTEX + bool "Use sys::Semaphore to synchronize forks" + help + Use to have the dining philosophers sample use sys::Mutex, with one per fork, to + synchronize. + + config SYNC_CONDVAR + bool "Use sync::Condvar and sync::Mutex to synchronize forks" + help + Use to have the dining philosophers sample use a single data structure, protected + by a sync::Mutex and coordinated with a sync::Condvar, to synchronize. + + config SYNC_CHANNEL + bool "Use sync::channel to synchronize forks" + help + Use to have the dining philosophers sample use a worker thread, communicating via + channels to synchronize. + + config SYNC_WORKQUEUE + bool "Use workqueues to simulate the philosophers" + help + Use workqueues to simulate the philosophers. + +endchoice + +if SYNC_CHANNEL + config USE_BOUNDED_CHANNELS + bool "Should channel sync use bounded channels?" + default y + help + If set, the channel-based communication will use bounded channels with bounds calculated + to not ever block. +endif diff --git a/samples/work-philosophers/boards/rpi_pico.conf b/samples/work-philosophers/boards/rpi_pico.conf new file mode 100644 index 00000000..94c0843a --- /dev/null +++ b/samples/work-philosophers/boards/rpi_pico.conf @@ -0,0 +1,7 @@ +# Copyright (c) 2024 Linaro LTD +# SPDX-License-Identifier: Apache-2.0 + +# This board doesn't have a serial console, so use RTT. +CONFIG_UART_CONSOLE=n +CONFIG_RTT_CONSOLE=y +CONFIG_USE_SEGGER_RTT=y diff --git a/samples/work-philosophers/build.rs b/samples/work-philosophers/build.rs new file mode 100644 index 00000000..22233f15 --- /dev/null +++ b/samples/work-philosophers/build.rs @@ -0,0 +1,9 @@ +// Copyright (c) 2023 Linaro LTD +// SPDX-License-Identifier: Apache-2.0 + +// This crate needs access to kconfig variables. This is an example of how to do that. The +// zephyr-build must be a build dependency. + +fn main() { + zephyr_build::export_bool_kconfig(); +} diff --git a/samples/work-philosophers/prj.conf b/samples/work-philosophers/prj.conf new file mode 100644 index 00000000..108393cb --- /dev/null +++ b/samples/work-philosophers/prj.conf @@ -0,0 +1,23 @@ +# Copyright (c) 2024 Linaro LTD +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_RUST=y +CONFIG_RUST_ALLOC=y +CONFIG_MAIN_STACK_SIZE=8192 +CONFIG_SYSTEM_WORKQUEUE_STACK_SIZE=4096 + +CONFIG_POLL=y + +# CONFIG_DEBUG=y +CONFIG_DEBUG=n +# CONFIG_ASSERT=y +CONFIG_MPU_STACK_GUARD=y + +# CONFIG_USERSPACE=y + +# Some debugging +CONFIG_THREAD_MONITOR=y +CONFIG_THREAD_ANALYZER=y +CONFIG_THREAD_ANALYZER_USE_PRINTK=y +CONFIG_THREAD_ANALYZER_AUTO=n +# CONFIG_THREAD_ANALYZER_AUTO_INTERVAL=15 diff --git a/samples/work-philosophers/sample.yaml b/samples/work-philosophers/sample.yaml new file mode 100644 index 00000000..a4235fe7 --- /dev/null +++ b/samples/work-philosophers/sample.yaml @@ -0,0 +1,15 @@ +sample: + description: Philosphers, in Rust + name: workq philosophers rust +common: + harness: console + harness_config: + type: one_line + regex: + - "All threads done" + tags: rust + filter: CONFIG_RUST_SUPPORTED +tests: + sample.rust.philosopher: + tags: introduction + min_ram: 32 diff --git a/samples/work-philosophers/src/async_sem.rs b/samples/work-philosophers/src/async_sem.rs new file mode 100644 index 00000000..ee72cc86 --- /dev/null +++ b/samples/work-philosophers/src/async_sem.rs @@ -0,0 +1,115 @@ +//! Async Semaphore based demo +//! +//! This implementation on the dining philosopher problem uses Zephyr semaphores to represent the +//! forks. Each philosopher dines as per the algorithm a number of times, and when the are all +//! finished, the test is considered successful. Deadlock will result in the primary thread not +//! completing. +//! +//! Notably, this uses Rc and RefCell along with spawn_local to demonstrate that multiple async +//! tasks run on the same worker do not need Send. It is just important that write operations on +//! the RefCell do not `.await` or a panic is likely. + +use core::cell::RefCell; + +use alloc::{rc::Rc, vec::Vec}; +use zephyr::{ + kio::{sleep, spawn_local}, + printkln, + sys::sync::Semaphore, + time::Forever, +}; + +use crate::{get_random_delay, Stats, NUM_PHIL}; + +/// Number of iterations of each philospher. +/// +/// Should be long enough to exercise the test, but too +/// long and the test will timeout. The delay calculated will randomly be between 25 and 775, and +/// there are two waits, so typically, each "eat" will take about a second. +const EAT_COUNT: usize = 10; + +pub async fn phil() -> Stats { + // It is a little tricky to be able to use local workers. We have to have this nested thread + // that waits. This is because the Future from `local_phil()` does not implement Send, since it + // waits for the philosophers, which are not Send. However, this outer async function does not + // hold onto any data that is not send, and therefore will be Send. Fortunately, this extra + // Future is very lightweight. + spawn_local(local_phil(), c"phil_wrap").join_async().await +} + +async fn local_phil() -> Stats { + // Our overall stats. + let stats = Rc::new(RefCell::new(Stats::default())); + + // One fork for each philospher. + let forks: Vec<_> = (0..NUM_PHIL) + .map(|_| Rc::new(Semaphore::new(1, 1).unwrap())) + .collect(); + + // Create all of the philosphers + let phils: Vec<_> = (0..NUM_PHIL) + .map(|i| { + // Determine the two forks. The forks are paired with each philosopher taking the fork of + // their number, and the next on, module the size of the ring. However, for the last case, + // we need to swap the forks used, it is necessary to obey a strict ordering of the locks to + // avoid deadlocks. + let forks = if i == NUM_PHIL - 1 { + [forks[0].clone(), forks[i].clone()] + } else { + [forks[i].clone(), forks[i + 1].clone()] + }; + + let phil = one_phil(forks, i, stats.clone()); + printkln!("Size of child {i}: {}", size_of_val(&phil)); + spawn_local(phil, c"phil") + }) + .collect(); + + // Wait for them all to finish. + for p in phils { + p.join_async().await; + } + + // Leak the stats as a test. + // Uncomment this to test that the expect below does truly detect a missed drop. + // let _ = Rc::into_raw(stats.clone()); + + // At this point, all of the philosphers should have dropped their stats ref, and we should be + // able to turn stats back into it's value. + // This tests that completed work does drop the future. + Rc::into_inner(stats) + .expect("Failure: a philospher didn't drop it's future") + .into_inner() +} + +/// Simulate a single philospher. +/// +/// The forks must be ordered with the first fork having th lowest number, otherwise this will +/// likely deadlock. +/// +/// This will run for EAT_COUNT times, and then return. +async fn one_phil(forks: [Rc; 2], n: usize, stats: Rc>) { + for i in 0..EAT_COUNT { + // Acquire the forks. + // printkln!("Child {n} take left fork"); + forks[0].take_async(Forever).await.unwrap(); + // printkln!("Child {n} take right fork"); + forks[1].take_async(Forever).await.unwrap(); + + // printkln!("Child {n} eating"); + let delay = get_random_delay(n, 25); + sleep(delay).await; + stats.borrow_mut().record_eat(n, delay); + + // Release the forks. + // printkln!("Child {n} giving up forks"); + forks[1].give(); + forks[0].give(); + + let delay = get_random_delay(n, 25); + sleep(delay).await; + stats.borrow_mut().record_think(n, delay); + + printkln!("Philospher {n} finished eating time {i}"); + } +} diff --git a/samples/work-philosophers/src/hand_worker.rs b/samples/work-philosophers/src/hand_worker.rs new file mode 100644 index 00000000..84f7e95a --- /dev/null +++ b/samples/work-philosophers/src/hand_worker.rs @@ -0,0 +1,285 @@ +//! Work-queue work, with hand-constructed work. +//! +//! This module tries to demonstrate what a hand-crafted system based on work-queues might be like. +//! +//! As such, this isn't built around any synchronization mechanimsms other than signal. In some +//! sense, this is structured more like various workers that are coordinating with devices, except +//! that we will use timeouts for the pauses. + +use core::{future::Future, pin::Pin, task::{Context, Poll}}; + +use alloc::vec; +use alloc::vec::Vec; +use zephyr::{kio::{spawn, ContextExt}, printkln, sync::{Arc, SpinMutex}, time::Forever, work::{futures::JoinHandle, Signal, WorkQueue}}; + +use crate::{get_random_delay, NUM_PHIL}; +pub use crate::Stats; + +pub fn phil(workq: &WorkQueue) -> Manager { + let wake_manager = Arc::new(Signal::new().unwrap()); + + let actions: Vec<_> = (0..NUM_PHIL).map(|_| Arc::new(Action::new(wake_manager.clone()))).collect(); + + let phils: Vec<_> = (0..NUM_PHIL) + .map(|i| Phil::new(actions[i].clone(), i)) + .map(|act| spawn(act, workq, c"phil")) + .collect(); + + Manager { + request: wake_manager, + actions, + phils, + forks: vec![ForkState::Idle; NUM_PHIL], + } +} + +#[derive(Copy, Clone, Debug)] +enum ForkState { + /// Nobody is using the fork. + Idle, + /// A single philospher is eating with this fork. + Eating, + /// Someone is eating, and the numbered philospher is also waiting to use it. + Waiting(usize), +} + +/// Outer Phil is the main event handler for the work queue system. +pub struct Manager { + actions: Vec>, + phils: Vec>, + /// The signal to wake the manager up. + request: Arc, + + // The state of each fork. + forks: Vec, +} + +impl Future for Manager { + type Output = Stats; + + fn poll(mut self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> core::task::Poll { + // Run through the actions, and see what they have to do. + printkln!("Manager running"); + + // Clear out signal before any processing, so it can be set. + self.request.reset(); + + // Loop through all of the actions. + for i in 0..self.actions.len() { + // for (i, act) in self.actions.iter().enumerate() { + let act = &self.actions[i]; + let mut change = None; + let mut lock = act.fork.lock().unwrap(); + match *lock { + ForkRequest::Idle => (), + ForkRequest::Waiting => (), + ForkRequest::Take(f) => { + printkln!("phil {i} wants fork {f}: state {:?}", self.forks[f]); + match self.forks[f] { + ForkState::Idle => { + assert!(change.is_none()); + + // This philospher can have this fork. + change = Some((f, ForkState::Eating)); + + // And let them know they got it. + *lock = ForkRequest::Idle; + act.wake_phil.raise(-1).unwrap(); + } + ForkState::Eating => { + // The fork is busy, but remember who is waiting for it. + assert!(change.is_none()); + change = Some((f, ForkState::Waiting(i))); + *lock = ForkRequest::Waiting; + } + ForkState::Waiting(i2) => { + // This indicates the forks were not assigned to the philosphers + // correctly. + panic!("Too many philosphers requesting same fork {i} {i2}"); + } + } + } + ForkRequest::Give(f) => { + printkln!("phil {i} releases fork {f}: state {:?}", self.forks[f]); + match self.forks[f] { + ForkState::Idle => { + panic!("Philospher returned a fork it did not have"); + } + ForkState::Eating => { + // This philospher was the only one using this fork. + assert!(change.is_none()); + change = Some((f, ForkState::Idle)); + + // And let them now that was fine. + *lock = ForkRequest::Idle; + // TODO: Move this raise to after the lock to shorten the time spent + // holding the lock. + act.wake_phil.raise(-2).unwrap(); + } + ForkState::Waiting(i2) => { + // We (i) are done with the fork, and can now give it to i2. + // The state changes to Eating to indicate one waiter. + assert!(change.is_none()); + change = Some((f, ForkState::Eating)); + + // We inform current philospher that we have handled them being done + // with the fork. + *lock = ForkRequest::Idle; + act.wake_phil.raise(-1).unwrap(); + + // And inform the waiter that they can continue. + *self.actions[i2].fork.lock().unwrap() = ForkRequest::Idle; + self.actions[i2].wake_phil.raise(-2).unwrap(); + } + } + } + } + drop(lock); + if let Some((f, state)) = change { + self.forks[f] = state; + } + } + + // Unless we're completely done, set to wake on our own signal. + cx.add_signal(&self.request, Forever); + printkln!("Manager pending"); + Poll::Pending + } +} + +/// Captures requests from a philospher for exclusive use of some forks. +/// +/// This works by having the philosopher set the request, and the Manager sets this to Idle when it +/// has been satisfied. Each will signal the other after setting this value. +struct Action { + fork: SpinMutex, + wake_manager: Arc, + wake_phil: Signal, +} + +impl Action { + fn new(wake_manager: Arc) -> Self { + Self { + fork: SpinMutex::new(ForkRequest::Idle), + wake_manager, + wake_phil: Signal::new().unwrap(), + } + } +} + +/// A single request concerning a fork. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum ForkRequest { + /// There is no request pending. + Idle, + /// Take the given numbered fork. + Take(usize), + /// Give back the give numbered fork. + Give(usize), + /// The philospher has requested a fork, but it is not available. + Waiting, +} + +/// The state of a philospher. +#[derive(Debug, Copy, Clone)] +enum PhilState { + /// The initial state. + Init, + /// Wait upon getting the given fork. + Take(usize), + /// Eating is happening, should wake from a sleep. + Eating, + /// Waiting on returning the given fork. + Give(usize), + /// Resting between bytes. + Resting, + /// Done with the whole thing. poll will always return Ready after setting this. + Done, +} + +/// Phil represents a single dining philospher. +/// +/// Each Phil runs on its own, making its requests to 'req' for taking and returning the forks. +struct Phil { + /// Which philospher are we? + index: usize, + /// Our view of our actions. + action: Arc, + /// Current state of this philosopher. + state: PhilState, + /// How many times have we finished. + count: usize, + /// The forks we should be using. + forks: [usize; 2], +} + +impl Phil { + fn new(action: Arc, index: usize) -> Self { + let forks = if index == NUM_PHIL - 1 { + [0, index] + } else { + [index, index + 1] + }; + + Self { + index, + action, + state: PhilState::Init, + count: 0, + forks, + } + } +} + +impl Future for Phil { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + printkln!("Phil run: {}, {:?}", self.index, self.state); + + match self.state { + PhilState::Init => { + // Initially, request the first fork to be taken. + *self.action.fork.lock().unwrap() = ForkRequest::Take(self.forks[0]); + self.action.wake_manager.raise(self.index as i32).unwrap(); + + // Next wake event is the signal. + cx.add_signal(&self.action.wake_phil, Forever); + + self.state = PhilState::Take(0); + } + PhilState::Take(f) => { + // Check that we were actually supposed to wake up. There shouldn't be spurious + // wakeups in this direction. + let cur_state = *self.action.fork.lock().unwrap(); + if cur_state != ForkRequest::Idle { + panic!("State error, taken fork should be idle: {:?}", cur_state); + } + + if f == 1 { + // Both are taken, We're eating, and we wait by timeout. + printkln!("Phil {} eating", self.index); + + self.state = PhilState::Eating; + let delay = get_random_delay(self.index, 25); + cx.add_timeout(delay); + } else { + // First fork taken, wait for second. + printkln!("Setting state to {:?}", ForkRequest::Take(self.forks[1])); + *self.action.fork.lock().unwrap() = ForkRequest::Take(self.forks[1]); + self.action.wake_manager.raise(self.index as i32).unwrap(); + + cx.add_signal(&self.action.wake_phil, Forever); + + self.state = PhilState::Take(1); + } + } + PhilState::Eating => todo!(), + PhilState::Give(_) => todo!(), + PhilState::Resting => todo!(), + PhilState::Done => todo!(), + } + + Poll::Pending + } +} diff --git a/samples/work-philosophers/src/lib.rs b/samples/work-philosophers/src/lib.rs new file mode 100644 index 00000000..a3022431 --- /dev/null +++ b/samples/work-philosophers/src/lib.rs @@ -0,0 +1,142 @@ +// Copyright (c) 2023 Linaro LTD +// SPDX-License-Identifier: Apache-2.0 + +#![no_std] +// Cargo tries to detect configs that have typos in them. Unfortunately, the Zephyr Kconfig system +// uses a large number of Kconfigs and there is no easy way to know which ones might conceivably be +// valid. This prevents a warning about each cfg that is used. +#![allow(unexpected_cfgs)] + +extern crate alloc; + +use alloc::vec; +use alloc::vec::Vec; +use zephyr::{ + kio::spawn, + kobj_define, printkln, + sync::Arc, + sys::uptime_get, + time::{Duration, Tick}, + work::WorkQueueBuilder, +}; + +mod async_sem; +mod hand_worker; + +/// How many philosophers. There will be the same number of forks. +/// +/// For async, this can typically be quite a bit larger than the number of threads possible. +const NUM_PHIL: usize = 6; +//const NUM_PHIL: usize = 16; + +/// Size of the stack for the work queue. +const WORK_STACK_SIZE: usize = 2048; + +// The dining philosophers problem is a simple example of cooperation between multiple threads. +// This implementation demonstrates a few ways that Zephyr's work-queues can be used to simulate +// this problem. + +#[no_mangle] +extern "C" fn rust_main() { + printkln!( + "Async/work-queue dining philosophers{}", + zephyr::kconfig::CONFIG_BOARD + ); + printkln!("Time tick: {}", zephyr::time::SYS_FREQUENCY); + + // Create the work queue to run this. + let worker = Arc::new( + WorkQueueBuilder::new() + .set_priority(1) + .start(WORK_STACK.init_once(()).unwrap()), + ); + + // In addition, create a lower priority worker. + let lower_worker = Arc::new( + WorkQueueBuilder::new() + .set_priority(5) + .start(LOWER_WORK_STACK.init_once(()).unwrap()), + ); + + // It is important that work queues are not dropped, as they are persistent objects in the + // Zephyr world. + let _ = Arc::into_raw(lower_worker.clone()); + let _ = Arc::into_raw(worker.clone()); + + // Run the by-hand worker. + printkln!("Running hand-worker test"); + let work = hand_worker::phil(&lower_worker); + let handle = spawn(work, &worker, c"hand-work"); + let stats = handle.join(); + printkln!("Done with hand-worker"); + stats.show(); + + // Run the async semaphore based worker. + printkln!("Running 'async-sem' test"); + // let handle = spawn(async_sem::phil(), &worker, c"async-sem"); + let work = async_sem::phil(); + printkln!("size of async-sem worker: {}", size_of_val(&work)); + let handle = spawn(work, &worker, c"async-sem"); + let stats = handle.join(); + printkln!("Done with 'async-sem' test"); + stats.show(); + + printkln!("All threads done"); +} + +kobj_define! { + static WORK_STACK: ThreadStack; + static LOWER_WORK_STACK: ThreadStack; +} + +/// Get a random delay, based on the ID of this user, and the current uptime. +fn get_random_delay(id: usize, period: usize) -> Duration { + let tick = (uptime_get() & (usize::MAX as i64)) as usize; + let delay = (tick / 100 * (id + 1)) & 0x1f; + + // Use one greater to be sure to never get a delay of zero. + Duration::millis_at_least(((delay + 1) * period) as Tick) +} + +/// Instead of just printint out so much information that the data just scolls by, gather +/// statistics. +// #[derive(Default)] +pub struct Stats { + /// How many times each philosopher has gone through the loop. + count: Vec, + /// How much time each philosopher has spent eating. + eating: Vec, + /// How much time each philosopher has spent thinking. + thinking: Vec, +} + +// Implement default manually, as the fixed arrays only implement initialization up to 64 elements. +impl Default for Stats { + fn default() -> Self { + Self { + count: vec![0; NUM_PHIL], + eating: vec![0; NUM_PHIL], + thinking: vec![0; NUM_PHIL], + } + } +} + +impl Stats { + fn record_eat(&mut self, index: usize, time: Duration) { + self.eating[index] += time.to_millis() as u16; + } + + fn record_think(&mut self, index: usize, time: Duration) { + self.thinking[index] += time.to_millis() as u16; + self.count[index] += 1; + } + + fn show(&self) { + printkln!( + "c:{:?}, e:{:?}, t:{:?}", + self.count, + self.eating, + self.thinking + ); + } +} diff --git a/zephyr-build/Cargo.toml b/zephyr-build/Cargo.toml index c73a95e6..409f7b93 100644 --- a/zephyr-build/Cargo.toml +++ b/zephyr-build/Cargo.toml @@ -15,3 +15,10 @@ Provides utilities for accessing Kconfig and devicetree information. # used by the core Zephyr tree, but are needed by zephyr applications. [dependencies] regex = "1.10.3" +pest = "2.6" +pest_derive = "2.6" +quote = "1.0" +proc-macro2 = "1.0.86" +serde = { version = "1.0", features = ["derive"] } +serde_yaml_ng = "0.10" +anyhow = "1.0.89" diff --git a/zephyr-build/src/devicetree.rs b/zephyr-build/src/devicetree.rs new file mode 100644 index 00000000..dda4bc11 --- /dev/null +++ b/zephyr-build/src/devicetree.rs @@ -0,0 +1,291 @@ +//! Incorporating Zephyr's devicetree into Rust. +//! +//! Zephyr depends fairly heavily on the devicetree for configuration. The build system reads +//! multiple DTS files, and coalesces this into a single devicetree. This tree is output in a few +//! different ways: +//! +//! - Canonical DTS. There is a single DTS file (`build/zephyr/zephyr.dts`) that contains the final +//! tree, but still in DTS format (the DTB file would have information discarded). +//! +//! - Generated. The C header `devicetree_generated.h` contains all of the definitions. This isn't +//! a particularly friendly file to read or parse, but it does have one piece of information that is +//! not represented anywhere else: the mapping between devicetree nodes and their "ORD" index. The +//! device nodes in the system are indexed by this number, and we need this in order to be able to +//! reference the nodes from Rust. +//! +//! Beyond the ORD field, it seems easier to deal with the DTS file itself. Parsing is fairly +//! straightforward, as it is a subset of the DTS format, and we only have to be able to deal with +//! the files that are generated by the Zephyr build process. + +// TODO: Turn this off. +#![allow(dead_code)] + +use ordmap::OrdMap; +use std::{cell::RefCell, collections::BTreeMap, path::Path, rc::Rc}; + +mod augment; +mod ordmap; +mod output; +mod parse; + +pub use augment::{Augment, load_augments}; + +/// Representation of a parsed device tree. +pub struct DeviceTree { + /// The root of the tree. + root: Rc, + /// All of the labels. + /// Note that this is a BTree so that the output will be deterministic. + labels: BTreeMap>, +} + +// A single node in a [`DeviceTree`]. +pub struct Node { + // The name of the node itself. + name: String, + // The full path of this node in the tree. + path: String, + // The "route" is the path, but still as separate entries. + route: Vec, + // The ord index in this particular Zephyr build. + ord: usize, + // Labels attached to this node. + labels: Vec, + // Any properties set in this node. + properties: Vec, + // Children nodes. + children: Vec>, + // The parent. Should be non-null except at the root node. + parent: RefCell>>, +} + +#[derive(Debug)] +pub struct Property { + pub name: String, + pub value: Vec, +} + +// Although the real device flattends all of these into bytes, Zephyr takes advantage of them at a +// slightly higher level. +#[derive(Debug)] +pub enum Value { + Words(Vec), + Bytes(Vec), + Phandle(Phandle), + String(String), +} + +/// A phandle is a named reference to a labeled part of the DT. We resolve this by making the +/// reference optional, and filling them in afterwards. +pub struct Phandle { + /// The label of our target. Keep this because it may be useful to know which label was used, + /// as nodes often have multiple labels. + name: String, + /// The inside of the node, inner mutability so this can be looked up and cached. + node: RefCell>>, +} + +#[derive(Debug)] +pub enum Word { + Number(u32), + Phandle(Phandle), +} + +impl DeviceTree { + /// Decode the `zephyr.dts` and `devicetree_generated.h` files from the build and build an internal + /// representation of the devicetree itself. + pub fn new, P2: AsRef>(dts_path: P1, dt_gen: P2) -> DeviceTree { + let ords = OrdMap::new(dt_gen); + + let dts = std::fs::read_to_string(dts_path) + .expect("Reading zephyr.dts file"); + let dt = parse::parse(&dts, &ords); + + // Walk the node tree, fixing any phandles to include their reference. + dt.root.phandle_walk(&dt.labels); + + // Walk the node tree, setting each node's parent appropriately. + dt.root.parent_walk(); + + dt + } +} + +impl Node { + fn phandle_walk(&self, labels: &BTreeMap>) { + for prop in &self.properties { + for value in &prop.value { + value.phandle_walk(labels); + } + } + for child in &self.children { + child.phandle_walk(labels); + } + } + + fn parent_walk(self: &Rc) { + for child in &self.children { + *(child.parent.borrow_mut()) = Some(self.clone()); + child.parent_walk() + } + } + + fn is_compatible(&self, name: &str) -> bool { + self.properties + .iter() + .filter(|p| p.name == "compatible") + .flat_map(|prop| prop.value.iter()) + .any(|v| matches!(v, Value::String(vn) if name == vn)) + } + + /// A richer compatible test. Walks a series of names, in reverse. Any that are "Some(x)" must + /// be compatible with "x" at that level. + fn compatible_path(&self, path: &[Option<&str>]) -> bool { + if let Some(first) = path.first() { + if !matches!(first, Some(name) if !self.is_compatible(name)) { + return false; + } + + // Walk down the tree with the remainder of the path. + if let Some(child) = self.parent.borrow().as_ref() { + child.compatible_path(&path[1..]) + } else { + // We've run out of nodes, so this is considered not matching. + false + } + } else { + // The empty path always matches. + true + } + } + + /// Returns `true` if there is a property with this name. + fn has_prop(&self, name: &str) -> bool { + self.properties.iter().any(|p| p.name == name) + } + + /// Returns the slice of values of a property with this name as `Some` or `None` if the property + /// does not exist. + fn get_property(&self, name: &str) -> Option<&[Value]> { + self.properties + .iter() + .find_map(|p| if p.name == name { Some(p.value.as_slice()) } else { None }) + } + + /// Attempt to retrieve the named property, as a single entry of Words. + fn get_words(&self, name: &str) -> Option<&[Word]> { + self.get_property(name) + .and_then(|p| { + match p { + &[Value::Words(ref w)] => Some(w.as_ref()), + _ => None, + } + }) + } + + /// Get a property that consists of a single number. + fn get_number(&self, name: &str) -> Option { + self.get_words(name) + .and_then(|p| { + if let &[Word::Number(n)] = p { + Some(n) + } else { + None + } + }) + } + + /// Get a property that consists of multiple numbers. + fn get_numbers(&self, name: &str) -> Option> { + let mut result = vec![]; + for word in self.get_words(name)? { + if let Word::Number(n) = word { + result.push(*n); + } else { + return None; + } + } + Some(result) + } + + /// Get a property that is a single string. + fn get_single_string(&self, name: &str) -> Option<&str> { + self.get_property(name) + .and_then(|p| { + if let &[Value::String(ref text)] = p { + Some(text.as_ref()) + } else { + None + } + }) + } +} + +impl Value { + fn phandle_walk(&self, labels: &BTreeMap>) { + match self { + Self::Phandle(ph) => ph.phandle_resolve(labels), + Self::Words(words) => { + for w in words { + if let Word::Phandle(ph) = w { + ph.phandle_resolve(labels); + } + } + } + _ => (), + } + } +} + +impl Phandle { + /// Construct a phandle that is unresolved. + pub fn new(name: String) -> Self { + Self { + name, + node: RefCell::new(None), + } + } + + /// Resolve this phandle, with the given label for lookup. + fn phandle_resolve(&self, labels: &BTreeMap>) { + // If already resolve, just return. + if self.node.borrow().is_some() { + return; + } + + let node = labels.get(&self.name).cloned() + .expect("Missing phandle"); + *self.node.borrow_mut() = Some(node); + } + + /// Get the child node, panicing if it wasn't resolved properly. + fn node_ref(&self) -> Rc { + self.node.borrow().as_ref().unwrap().clone() + } +} + +impl Word { + pub fn as_number(&self) -> Option { + match self { + Self::Number(n) => Some(*n), + _ => None, + } + } + + pub fn get_phandle(&self) -> Option<&Phandle> { + match self { + Word::Phandle(ph) => Some(ph), + _ => None, + } + } +} + +// To avoid recursion, the debug printer for Phandle just prints the name. +impl std::fmt::Debug for Phandle { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt + .debug_struct("Phandle") + .field("name", &self.name) + .finish_non_exhaustive() + } +} diff --git a/zephyr-build/src/devicetree/augment.rs b/zephyr-build/src/devicetree/augment.rs new file mode 100644 index 00000000..a38b0b23 --- /dev/null +++ b/zephyr-build/src/devicetree/augment.rs @@ -0,0 +1,398 @@ +//! Support for augmenting the device tree. +//! +//! There are various aspects of the device tree in Zephyr whose semantics are only indirectly +//! defined by the behavior of C code. Rather than trying to decipher this at build time, we will +//! use one or more yaml files that describe aspects of the device tree. +//! +//! This module is responsible for the format of this config file and the parsed contents will be +//! used to generate the [`Augment`] objects that will do the actual augmentation of the generated +//! device tree. +//! +//! Each augment is described by a top-level yaml element in an array. + +use std::{fs::File, path::Path}; + +use anyhow::Result; +use proc_macro2::{Ident, TokenStream}; +use quote::{format_ident, quote}; +use serde::{Deserialize, Serialize}; + +use crate::devicetree::{output::dt_to_lower_id, Value, Word}; + +use super::{DeviceTree, Node}; + +/// This action is given to each node in the device tree, and it is given a chance to return +/// additional code to be included in the module associated with that entry. These are all +/// assembled together and included in the final generated devicetree.rs. +pub trait Augment { + /// The default implementation checks if this node matches and calls a generator if it does, or + /// does nothing if not. + fn augment(&self, node: &Node, tree: &DeviceTree) -> TokenStream { + // If there is a status field present, and it is not set to "okay", don't augment this node. + if let Some(status) = node.get_single_string("status") { + if status != "okay" { + return TokenStream::new(); + } + } + if self.is_compatible(node) { + self.generate(node, tree) + } else { + TokenStream::new() + } + } + + /// A query if this node is compatible with this augment. A simple case might check the node's + /// compatible field, but also makes sense to check a parent's compatible. + fn is_compatible(&self, node: &Node) -> bool; + + /// A generator to be called when we are compatible. + fn generate(&self, node: &Node, tree: &DeviceTree) -> TokenStream; +} + +/// A top level augmentation. +/// +/// This top level augmentation describes how to match a given node within the device tree, and then +/// what kind of action to describe upon that. +#[derive(Debug, Serialize, Deserialize)] +pub struct Augmentation { + /// A name for this augmentation. Used for diagnostic purposes. + name: String, + /// What to match. This is an array, and all must match for a given node to be considered. + /// This does mean that if this is an empty array, it will match on every node. + rules: Vec, + /// What to do when a given node matches. + actions: Vec, +} + +impl Augment for Augmentation { + fn is_compatible(&self, node: &Node) -> bool { + self.rules.iter().all(|n| n.is_compatible(node)) + } + + fn generate(&self, node: &Node, tree: &DeviceTree) -> TokenStream { + let name = format_ident!("{}", dt_to_lower_id(&self.name)); + let actions = self.actions.iter().map(|a| a.generate(&name, node, tree)); + + quote! { + #(#actions)* + } + } +} + +/// A matching rule. +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case", content = "value")] +pub enum Rule { + /// A set of "or" matches. + Or(Vec), + /// A set of "and" matches. Not needed at the top level, as the top level vec is an implicit + /// and. + And(Vec), + /// Matches if the node has the given property. + HasProp(String), + /// Matches if this node has one of the listed compatible strings. The the 'level' property + /// indicates how many levels up in the tree. Zero means match the current node, 1 means the + /// parent node, and so on. + Compatible { + names: Vec, + level: usize, + }, + /// Matches at the root of tree. + Root, +} + +impl Rule { + fn is_compatible(&self, node: &Node) -> bool { + match self { + Rule::Or(rules) => rules.iter().any(|n| n.is_compatible(node)), + Rule::And(rules) => rules.iter().all(|n| n.is_compatible(node)), + Rule::HasProp(name) => node.has_prop(name), + Rule::Compatible { names, level } => parent_compatible(node, names, *level), + Rule::Root => node.parent.borrow().is_none(), + } + } +} + +/// Determine if a node is compatible, looking `levels` levels up in the tree, where 0 means this +/// node. +fn parent_compatible(node: &Node, names: &[String], level: usize) -> bool { + // Writing this recursively simplifies the borrowing a lot. Otherwise, we'd have to clone the + // RCs. Our choice is the extra clone, or keeping the borrowed values on the stack. This code + // runs on the host, so the stack is easier. + if level == 0 { + names.iter().any(|n| node.is_compatible(n)) + } else { + if let Some(parent) = node.parent.borrow().as_ref() { + parent_compatible(parent, names, level - 1) + } else { + false + } + } +} + +/// An action to perform +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case", content = "value")] +pub enum Action { + /// Generate an "instance" with a specific device name. + Instance { + /// Where to get the raw device information. + raw: RawInfo, + /// The name of the full path (within the zephyr-sys crate) for the wrapper node for this + /// device. + device: String, + /// A Kconfig option to allow the instances to only be present when said driver is compiled + /// in. + kconfig: Option, + }, + /// Generate a getter for a gpio assignment property. + GpioPins { + /// The name of the property holding the pins. + property: String, + /// The name of the getter function. + getter: String, + }, + /// Generate all of the labels as its own node. + Labels, +} + +impl Action { + fn generate(&self, _name: &Ident, node: &Node, tree: &DeviceTree) -> TokenStream { + match self { + Action::Instance { raw, device, kconfig } => { + raw.generate(node, device, kconfig.as_deref()) + } + Action::GpioPins { property, getter } => { + let upper_getter = getter.to_uppercase(); + let getter = format_ident!("{}", getter); + // TODO: This isn't actually right, these unique values should be based on the pin + // definition so that we'll get a compile error if two parts of the DT reference the + // same pin. + + let pins = node.get_property(property).unwrap(); + let npins = pins.len(); + + let uniques: Vec<_> = (0..npins).map(|n| { + format_ident!("{}_UNIQUE_{}", upper_getter, n) + }).collect(); + + let pins = pins + .iter() + .zip(uniques.iter()) + .map(|(pin, unique)| decode_gpios_gpio(unique, pin)); + + let unique_defs = uniques.iter().map(|u| { + quote! { + static #u: crate::device::Unique = crate::device::Unique::new(); + } + }); + + quote! { + #(#unique_defs)* + pub fn #getter() -> [Option; #npins] { + [#(#pins),*] + } + } + } + Action::Labels => { + let nodes = tree.labels.iter().map(|(k, v)| { + let name = dt_to_lower_id(k); + let path = v.route_to_rust(); + quote! { + pub mod #name { + pub use #path::*; + } + } + }); + + quote! { + // This does assume the devicetree doesn't have a "labels" node at the root. + pub mod labels { + /// All of the labeles in the device tree. The device tree compiler + /// enforces that these are unique, allowing references such as + /// `zephyr::devicetree::labels::labelname::get_instance()`. + #(#nodes)* + } + } + } + } + } +} + +/// Decode a single gpio entry. +fn decode_gpios_gpio(unique: &Ident, entry: &Value) -> TokenStream { + let entry = if let Value::Words(w) = entry { + w + } else { + panic!("gpios list is not list of <&gpionnn aa bbb>"); + }; + if entry.len() != 3 { + panic!("gpios currently must be three items"); + } + let gpio_route = entry[0].get_phandle().unwrap().node_ref().route_to_rust(); + let args: Vec = entry[1..].iter().map(|n| n.as_number().unwrap()).collect(); + + quote! { + // TODO: Don't hard code this but put in yaml file. + unsafe { + crate::device::gpio::GpioPin::new( + &#unique, + #gpio_route :: get_instance_raw(), + #(#args),*) + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case", content = "value")] +pub enum RawInfo { + /// Get the raw device directly from this node. + Myself { + args: Vec, + }, + /// Get the reference from a parent of this node, at a given level. + Parent { + /// How many levels to look up. 0 would refer to this node (but would also be an error). + level: usize, + args: Vec, + }, + /// Get the raw device from a phandle property. Additional parameters in the phandle will be + /// passed as additional arguments to the `new` constructor on the wrapper type. + Phandle(String), +} + +impl RawInfo { + fn generate(&self, node: &Node, device: &str, kconfig: Option<&str>) -> TokenStream { + let device_id = str_to_path(device); + let kconfig = if let Some(name) = kconfig { + let name = format_ident!("{}", name); + quote! { + #[cfg(#name)] + } + } else { + quote! {} + }; + + match self { + Self::Myself { args } => { + let get_args = args.iter().map(|arg| arg.args(node)); + + let ord = node.ord; + let rawdev = format_ident!("__device_dts_ord_{}", ord); + quote! { + /// Get the raw `const struct device *` of the device tree generated node. + #kconfig + pub unsafe fn get_instance_raw() -> *const crate::raw::device { + &crate::raw::#rawdev + } + + #kconfig + static UNIQUE: crate::device::Unique = crate::device::Unique::new(); + #kconfig + pub fn get_instance() -> Option<#device_id> { + unsafe { + let device = get_instance_raw(); + #device_id::new(&UNIQUE, device, #(#get_args),*) + } + } + } + } + Self::Phandle(pname) => { + let words = node.get_words(pname).unwrap(); + // We assume that elt 0 is the phandle, and that the rest are numbers. + let target = if let Word::Phandle(handle) = &words[0] { + handle.node_ref() + } else { + panic!("phandle property {:?} in node is empty", pname); + }; + + // TODO: We would try to correlate with parent node's notion of number of cells, and + // try to handle cases where there is more than one reference. It is unclear when + // this will be needed. + let args: Vec = words[1..].iter().map(|n| n.as_number().unwrap()).collect(); + + let target_route = target.route_to_rust(); + + quote! { + #kconfig + static UNIQUE: crate::device::Unique = crate::device::Unique::new(); + #kconfig + pub fn get_instance() -> Option<#device_id> { + unsafe { + let device = #target_route :: get_instance_raw(); + #device_id::new(&UNIQUE, device, #(#args),*) + } + } + } + } + Self::Parent { level, args } => { + let get_args = args.iter().map(|arg| arg.args(node)); + + assert!(*level > 0); + let mut path = quote! {super}; + for _ in 1..*level { + path = quote! { #path :: super }; + } + + quote! { + #kconfig + static UNIQUE: crate::device::Unique = crate::device::Unique::new(); + #kconfig + pub fn get_instance() -> Option<#device_id> { + unsafe { + let device = #path :: get_instance_raw(); + #device_id::new(&UNIQUE, device, #(#get_args),*) + } + } + } + } + } + } +} + +/// Information about where to get constructor properties for arguments. +/// +/// At this point, we assume these all come from the current node. +#[derive(Debug, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case", content = "value")] +pub enum ArgInfo { + /// The arguments come from a 'reg' property. + Reg, + /// A count of the number of child nodes. + ChildCount, +} + +impl ArgInfo { + /// Extra properties for the argument, assembling the arguents that should be passed in. + fn args(&self, node: &Node) -> TokenStream { + match self { + ArgInfo::Reg => { + let reg = node.get_numbers("reg").unwrap(); + quote! { + #(#reg),* + } + } + ArgInfo::ChildCount => { + let count = node.children.len(); + quote! { + #count + } + } + } + } +} + +/// Split a path given by a user into a token stream. +fn str_to_path(path: &str) -> TokenStream { + let names = path.split("::").map(|n| format_ident!("{}", n)); + quote! { + #(#names)::* + } +} + +/// Load a file of the given name. +pub fn load_augments>(name: P) -> Result> { + let fd = File::open(name)?; + let augs: Vec = serde_yaml_ng::from_reader(fd)?; + Ok(augs) +} diff --git a/zephyr-build/src/devicetree/dts.pest b/zephyr-build/src/devicetree/dts.pest new file mode 100644 index 00000000..8a521580 --- /dev/null +++ b/zephyr-build/src/devicetree/dts.pest @@ -0,0 +1,77 @@ +// Device Tree Source file +// +// This is a pest parser for a subset of the DTS +// format that will be seen by the output of dtc. + +file = _{ SOI ~ header ~ node ~ EOI } + +header = _{ "/dts-v1/" ~ ";" } + +node = { + node_path ~ + "{" ~ + entry* ~ + "}" ~ ";" +} + +node_path = _{ + (label ~ ":")* + ~("/" | nodename) +} + +entry = _{ + property | + node +} + +property = { + (nodename ~ "=" ~ values ~ ";") | + (nodename ~ ";") +} + +values = _{ value ~ ("," ~ value)* } +value = _{ string | words | bytes | phandle } + +words = { + "<" ~ + (number | phandle)+ ~ + ">" +} + +bytes = { + "[" ~ + plain_hex_number+ ~ + "]" +} + +number = _{ decimal_number | hex_number } + +decimal_number = @{ + ('1'..'9') ~ + ASCII_DIGIT* +} + +hex_number = @{ + ("0x" | "0X") ~ + ASCII_HEX_DIGIT+ +} + +plain_hex_number = @{ + ASCII_HEX_DIGIT+ +} + +// Simple strings, no escapes or such. +string = @{ + "\"" ~ + (!("\"" | "\n") ~ ANY)* ~ + "\"" +} + +phandle = @{ "&" ~ label } + +label = @{ ASCII_ALPHA ~ (ASCII_ALPHANUMERIC | "_")* } +nodename = @{ + (ASCII_ALPHANUMERIC | "_" | "," | "." | "?" | "-" | "@" | "#")+ +} + +WHITESPACE = _{ " " | "\n" | "\t" } diff --git a/zephyr-build/src/devicetree/ordmap.rs b/zephyr-build/src/devicetree/ordmap.rs new file mode 100644 index 00000000..02bc0ec5 --- /dev/null +++ b/zephyr-build/src/devicetree/ordmap.rs @@ -0,0 +1,41 @@ +//! Devicetree ordmap +//! +//! The OrdMap provides a mapping between nodes on the devicetree, and their "ord" index. + +use std::{collections::BTreeMap, fs::File, io::{BufRead, BufReader}, path::Path, str::FromStr}; + +use regex::Regex; + +pub struct OrdMap(pub BTreeMap); + +impl OrdMap { + pub fn new>(path: P) -> OrdMap { + let mut result = BTreeMap::new(); + + let path_re = Regex::new(r#"^#define DT_(.*)_PATH "(.*)"$"#).unwrap(); + let ord_re = Regex::new(r#"^#define DT_(.*)_ORD (.*)$"#).unwrap(); + + // The last C name seen. + let mut c_name = "".to_string(); + let mut dt_path = "".to_string(); + + let fd = File::open(path) + .expect("Opening devicetree_generated.h"); + for line in BufReader::new(fd).lines() { + let line = line.expect("Reading from devicetree_generated.h"); + + if let Some(caps) = path_re.captures(&line) { + // println!("Path: {:?} => {:?}", &caps[1], &caps[2]); + c_name = caps[1].to_string(); + dt_path = caps[2].to_string(); + } else if let Some(caps) = ord_re.captures(&line) { + // println!("Ord: {:?} => {:?}", &caps[1], &caps[2]); + let ord = usize::from_str(&caps[2]).unwrap(); + assert_eq!(caps[1].to_string(), c_name); + result.insert(dt_path.clone(), ord); + } + } + + OrdMap(result) + } +} diff --git a/zephyr-build/src/devicetree/output.rs b/zephyr-build/src/devicetree/output.rs new file mode 100644 index 00000000..7eb9239a --- /dev/null +++ b/zephyr-build/src/devicetree/output.rs @@ -0,0 +1,213 @@ +//! Outputting the devicetree into Rust. + +// We output the device tree in a module tree in Rust that mirrors the DTS tree. Devicetree names +// are made into valid Rust identifiers by the simple rule that invalid characters are replaced with +// underscores. +// +// The actual output is somewhat specialized, and driven by the data, and the compatible values. +// Support for particular devices should also be added to the device tree here, so that the nodes +// make sense for that device, and that there are general accessors that return wrapped node types. + +use std::io::Write; + +use anyhow::Result; +use proc_macro2::{Ident, TokenStream}; +use quote::{format_ident, quote}; + +use super::{augment::Augment, DeviceTree, Node, Property, Value, Word}; + +impl DeviceTree { + /// Generate a TokenStream for the Rust representation of this device tree. + pub fn to_tokens(&self, augments: &[Box]) -> TokenStream { + + // Root is a little special. Since we don't want a module for this (it will be provided + // above where it is included, so it can get documentation and attributes), we use None for + // the name. + self.node_walk(self.root.as_ref(), None, &augments) + } + + // Write, to the given writer, CFG lines so that Rust code can conditionalize based on the DT. + pub fn output_node_paths(&self, write: &mut W) -> Result<()> { + self.root.as_ref().output_path_walk(write, None)?; + + // Also, output all of the labels. Technically, this depends on the labels augment being + // present. + writeln!(write, "cargo:rustc-cfg=dt=\"labels\"")?; + for label in self.labels.keys() { + writeln!(write, "cargo:rustc-cfg=dt=\"labels::{}\"", fix_id(label))?; + } + Ok(()) + } + + fn node_walk(&self, node: &Node, name: Option<&str>, augments: &[Box]) -> TokenStream { + let children = node.children.iter().map(|child| { + self.node_walk(child.as_ref(), Some(&child.name), augments) + }); + // Simplistic first pass, turn the properties into constents of the formatted text of the + // property. + let props = node.properties.iter().map(|prop| { + self.property_walk(prop) + }); + let ord = node.ord; + + // Open the parent as a submodule. This is the same as 'super', so not particularly useful. + /* + let parent = if let Some(parent) = node.parent.borrow().as_ref() { + let route = parent.route_to_rust(); + quote! { + pub mod silly_super { + pub use #route::*; + } + } + } else { + TokenStream::new() + }; + */ + + // If this is compatible with an augment, use the augment to add any additional properties. + let augs = augments.iter().map(|aug| aug.augment(node, self)); + + if let Some(name) = name { + let name_id = dt_to_lower_id(name); + quote! { + pub mod #name_id { + pub const ORD: usize = #ord; + #(#props)* + #(#children)* + // #parent + #(#augs)* + } + } + } else { + quote! { + #(#props)* + #(#children)* + #(#augs)* + } + } + } + + // This is the "fun" part. We try to find some patterns that can be formatted more nicely, but + // otherwise they are just somewhat simply converted. + fn property_walk(&self, prop: &Property) -> TokenStream { + // Pattern matching is rather messy at this point. + if let Some(value) = prop.get_single_value() { + match value { + Value::Words(ref words) => { + if words.len() == 1 { + match &words[0] { + Word::Number(n) => { + let tag = dt_to_upper_id(&prop.name); + return quote! { + pub const #tag: u32 = #n; + }; + } + _ => (), + } + } + } + Value::Phandle(ref ph) => { + let target = ph.node_ref(); + let route = target.route_to_rust(); + let tag = dt_to_lower_id(&prop.name); + return quote! { + pub mod #tag { + pub use #route::*; + } + } + } + _ => (), + } + } + general_property(prop) + } +} + +impl Node { + /// Return the route to this node, as a Rust token stream giving a fully resolved name of the + /// route. + pub fn route_to_rust(&self) -> TokenStream { + let route: Vec<_> = self.route.iter().map(|p| dt_to_lower_id(p)).collect(); + quote! { + crate :: devicetree #(:: #route)* + } + } + + /// Walk this tree of nodes, writing out the path names of the nodes that are present. The name + /// of None, indicates the root node. + fn output_path_walk(&self, write: &mut W, name: Option<&str>) -> Result<()> { + for child in &self.children { + let fixed_name = fix_id(&child.name); + let child_name = if let Some(name) = name { + format!("{}::{}", name, fixed_name) + } else { + fixed_name + }; + + writeln!(write, "cargo:rustc-cfg=dt=\"{}\"", child_name)?; + + for prop in &child.properties { + prop.output_path(write, &fix_id(&child_name))?; + } + + child.output_path_walk(write, Some(&child_name))?; + } + + Ok(()) + } +} + +impl Property { + // Return property values that consist of a single value. + fn get_single_value(&self) -> Option<&Value> { + if self.value.len() == 1 { + Some(&self.value[0]) + } else { + None + } + } + + // If this property is a single top-level phandle, output that a that path is valid. It isn't a + // real node, but acts like one. + fn output_path(&self, write: &mut W, name: &str) -> Result<()> { + if let Some(value) = self.get_single_value() { + if let Value::Phandle(_) = value { + writeln!(write, "cargo:rustc-cfg=dt=\"{}::{}\"", name, fix_id(&self.name))?; + } + } + Ok(()) + } +} + +fn general_property(prop: &Property) -> TokenStream { + let text = format!("{:?}", prop.value); + let tag = format!("{}_DEBUG", prop.name); + let tag = dt_to_upper_id(&tag); + quote! { + pub const #tag: &'static str = #text; + } +} + +/// Given a DT name, return an identifier for a lower-case version. +pub fn dt_to_lower_id(text: &str) -> Ident { + format_ident!("{}", fix_id(&text)) +} + +pub fn dt_to_upper_id(text: &str) -> Ident { + format_ident!("{}", fix_id(&text.to_uppercase())) +} + +/// Fix a devicetree identifier to be safe as a rust identifier. +fn fix_id(text: &str) -> String { + let mut result = String::new(); + for ch in text.chars() { + match ch { + '#' => result.push('N'), + '-' => result.push('_'), + '@' => result.push('_'), + ',' => result.push('_'), + ch => result.push(ch), + } + } + result +} diff --git a/zephyr-build/src/devicetree/parse.rs b/zephyr-build/src/devicetree/parse.rs new file mode 100644 index 00000000..44a42899 --- /dev/null +++ b/zephyr-build/src/devicetree/parse.rs @@ -0,0 +1,269 @@ +//! DTS Parser +//! +//! Parse a limited subset of the devicetree source file that is output by the device tree compiler. +//! This is used to parse the `zephyr.dts` file generated as a part of a Zephyr build. + +use std::{cell::RefCell, collections::BTreeMap, rc::Rc}; + +use pest::{iterators::{Pair, Pairs}, Parser}; +use pest_derive::Parser; + +use crate::devicetree::Phandle; + +use super::{ordmap::OrdMap, DeviceTree, Node, Property, Value, Word}; + +#[derive(Parser)] +#[grammar = "devicetree/dts.pest"] +pub struct Dts; + +pub fn parse(text: &str, ords: &OrdMap) -> DeviceTree { + let pairs = Dts::parse(Rule::file, text) + .expect("Parsing zephyr.dts"); + + let b = TreeBuilder::new(ords); + b.walk(pairs) +} + +struct TreeBuilder<'a> { + ords: &'a OrdMap, + /// All labels. + labels: BTreeMap>, +} + +impl<'a> TreeBuilder<'a> { + fn new(ords: &'a OrdMap) -> TreeBuilder<'a> { + TreeBuilder { + ords, + labels: BTreeMap::new(), + } + } + + fn walk(mut self, pairs: Pairs<'_, Rule>) -> DeviceTree { + // There is a single node at the top. + let node = pairs.into_iter().next().unwrap(); + assert_eq!(node.as_rule(), Rule::node); + + DeviceTree { + root: self.walk_node(node, "", &[]), + labels: self.labels, + } + } + + // This is a single node in the DTS. The name should match one of the ordmap entries. + // The root node doesn't get a nodename. + fn walk_node(&mut self, node: Pair<'_, Rule>, path: &str, route: &[String]) -> Rc { + /* + let ord = self.ords.0.get(name) + .expect("Unexpected node path"); + println!("Root: {:?} {}", name, ord); + */ + + let mut name = LazyName::new(path, route.to_owned(), &self.ords); + let mut labels = Vec::new(); + let mut properties = Vec::new(); + let mut children = Vec::new(); + + for pair in node.into_inner() { + match pair.as_rule() { + Rule::nodename => { + let text = pair.as_str(); + name.set(text.to_string()); + } + Rule::label => { + labels.push(pair.as_str().to_string()); + } + Rule::property => { + properties.push(decode_property(pair)); + } + Rule::node => { + let child_path = name.path_ref(); + children.push(self.walk_node(pair, child_path, &name.route_ref())); + } + r => panic!("node: {:?}", r), + } + } + + // Make a clone of the labels, as we need them cloned anyway. + let labels2 = labels.clone(); + + // Build this node. + // println!("Node: {:?}", name.path_ref()); + let mut result = name.into_node(); + result.labels = labels; + result.properties = properties; + result.children = children; + let node = Rc::new(result); + + // Insert all of the labels. + for lab in labels2 { + self.labels.insert(lab, node.clone()); + } + node + } +} + +/// Decode a property node in the parse tree. +fn decode_property(node: Pair<'_, Rule>) -> Property { + let mut name = None; + let mut value = Vec::new(); + for pair in node.into_inner() { + match pair.as_rule() { + Rule::nodename => { + name = Some(pair.as_str().to_string()); + } + Rule::words => { + value.push(Value::Words(decode_words(pair))); + } + Rule::phandle => { + // TODO: Decode these. + // println!("phandle: {:?}", pair.as_str()); + value.push(Value::Phandle(Phandle::new(pair.as_str()[1..].to_string()))); + } + Rule::string => { + // No escapes at this point. + let text = pair.as_str(); + // Remove the quotes. + let text = &text[1..text.len()-1]; + value.push(Value::String(text.to_string())); + } + Rule::bytes => { + value.push(Value::Bytes(decode_bytes(pair))); + } + r => panic!("rule: {:?}", r), + } + } + Property { name: name.unwrap(), value } +} + +fn decode_words<'i>(node: Pair<'i, Rule>) -> Vec { + let mut value = Vec::new(); + for pair in node.into_inner() { + match pair.as_rule() { + Rule::hex_number => { + let text = pair.as_str(); + let num = u32::from_str_radix(&text[2..], 16).unwrap(); + value.push(Word::Number(num)); + } + Rule::decimal_number => { + let text = pair.as_str(); + let num = u32::from_str_radix(text, 10).unwrap(); + value.push(Word::Number(num)); + } + Rule::phandle => { + // println!("phandle: {:?}", pair.as_str()); + let text = pair.as_str(); + value.push(Word::Phandle(Phandle::new(text[1..].to_string()))); + } + _ => unreachable!(), + } + } + value +} + +fn decode_bytes<'i>(node: Pair<'i, Rule>) -> Vec { + let mut value = Vec::new(); + for pair in node.into_inner() { + match pair.as_rule() { + Rule::plain_hex_number => { + let text = pair.as_str(); + let num = u8::from_str_radix(text, 16).unwrap(); + value.push(num) + } + _ => unreachable!(), + } + } + value +} + +// Lazily track the path and node name. The parse tree has the nodename for a given node come after +// entering the node, but before child nodes are seen. +struct LazyName<'a, 'b> { + // The parent path leading up to this node. Will be the empty string for the root node. + path: &'a str, + route: Vec, + ords: &'b OrdMap, + // Our information, once we have it. + info: Option, +} + +struct Info { + name: String, + // Our path, the parent path combined with our name. + path: String, + ord: usize, +} + +impl<'a, 'b> LazyName<'a, 'b> { + fn new(path: &'a str, route: Vec, ords: &'b OrdMap) -> LazyName<'a, 'b> { + if path.is_empty() { + let ord = ords.0["/"]; + LazyName { + path, + route, + ords, + info: Some(Info { + name: "/".to_string(), + path: "/".to_string(), + ord, + }) + } + } else { + LazyName { + path, + route, + ords, + info: None, + } + } + } + + /// Indicate that we now know our name. + fn set(&mut self, name: String) { + if self.info.is_some() { + panic!("Grammar error, node has multiple names"); + } + + self.route.push(name.clone()); + + let mut path = self.path.to_string(); + if path.len() > 1 { + path.push('/'); + } + path.push_str(&name); + // println!("node: {:?}", path); + let ord = self.ords.0[&path]; + self.info = Some(Info { + name, + path, + ord, + }); + } + + fn path_ref(&self) -> &str { + &self.info.as_ref().unwrap().path + } + + fn route_ref(&self) -> &[String] { + &self.route + } + + fn ord(&self) -> usize { + self.info.as_ref().unwrap().ord + } + + // Turn this into a template for a node, with the properties, labels and children as empty. + fn into_node(self) -> Node { + let info = self.info.unwrap(); + + Node { + name: info.name, + path: info.path, + route: self.route, + ord: info.ord, + labels: Vec::new(), + properties: Vec::new(), + children: Vec::new(), + parent: RefCell::new(None), + } + } +} diff --git a/zephyr-build/src/lib.rs b/zephyr-build/src/lib.rs index bec71347..c4f02ef5 100644 --- a/zephyr-build/src/lib.rs +++ b/zephyr-build/src/lib.rs @@ -15,9 +15,15 @@ use std::io::{BufRead, BufReader, Write}; use std::env; use std::fs::File; use std::path::Path; +use std::process::{Command, Stdio}; +use proc_macro2::TokenStream; use regex::Regex; +use devicetree::{Augment, DeviceTree}; + +mod devicetree; + /// Export boolean Kconfig entries. This must happen in any crate that wishes to access the /// configuration settings. pub fn export_bool_kconfig() { @@ -73,3 +79,91 @@ pub fn build_kconfig_mod() { } } } + +/// Parse the finalized DTS file, generating the Rust devicetree file. +fn import_dt() -> DeviceTree { + let zephyr_dts = env::var("ZEPHYR_DTS").expect("ZEPHYR_DTS must be set"); + let gen_include = env::var("BINARY_DIR_INCLUDE_GENERATED") + .expect("BINARY_DIR_INCLUDE_GENERATED must be set"); + + let generated = format!("{}/devicetree_generated.h", gen_include); + DeviceTree::new(&zephyr_dts, generated) +} + +pub fn build_dts() { + let dt = import_dt(); + + let outdir = env::var("OUT_DIR").expect("OUT_DIR must be set"); + let out_path = Path::new(&outdir).join("devicetree.rs"); + let mut out = File::create(&out_path).expect("Unable to create devicetree.rs"); + + let augments = env::var("DT_AUGMENTS").expect("DT_AUGMENTS must be set"); + let augments: Vec = augments.split_whitespace().map(String::from).collect(); + + // Make sure that cargo knows to run if this changes, or any file mentioned changes. + println!("cargo:rerun-if-env-changed=DT_AUGMENTS"); + for name in &augments { + println!("cargo:rerun-if-changed={}", name); + } + + let mut augs = Vec::new(); + for aug in &augments { + // println!("Load augment: {:?}", aug); + let mut aug = devicetree::load_augments(aug).expect("Loading augment file"); + augs.append(&mut aug); + } + // For now, just print it out. + // println!("augments: {:#?}", augs); + let augs: Vec<_> = augs + .into_iter() + .map(|aug| Box::new(aug) as Box) + .collect(); + + let tokens = dt.to_tokens(&augs); + if has_rustfmt() { + write_formatted(out, tokens); + } else { + writeln!(out, "{}", tokens).unwrap(); + }; +} + +/// Generate cfg directives for each of the nodes in the generated device tree. +/// +/// This assumes that build_dts was already run by the `zephyr` crate, which should happen if this +/// is called from a user application. +pub fn dt_cfgs() { + let dt = import_dt(); + dt.output_node_paths(&mut std::io::stdout()).unwrap(); +} + +/// Determine if `rustfmt` is in the path, and can be excecuted. Returns false on any kind of error. +pub fn has_rustfmt() -> bool { + match Command::new("rustfmt") + .arg("--version") + .status() + { + Ok(st) if st.success() => true, + _ => false, + } +} + +/// Attempt to write the contents to a file, using rustfmt. If there is an error running rustfmt, +/// print a warning, and then just directly write the file. +fn write_formatted(file: File, tokens: TokenStream) { + let mut rustfmt = Command::new("rustfmt") + .args(["--emit", "stdout"]) + .stdin(Stdio::piped()) + .stdout(file) + .stderr(Stdio::inherit()) + .spawn() + .expect("Failed to run rustfmt"); + // TODO: Handle the above failing. + + let mut stdin = rustfmt.stdin.as_ref().expect("Stdin should have been opened by spawn"); + writeln!(stdin, "{}", tokens).expect("Writing to rustfmt"); + + match rustfmt.wait() { + Ok(st) if st.success() => (), + _ => panic!("Failure running rustfmt"), + } +} diff --git a/zephyr-sys/Cargo.toml b/zephyr-sys/Cargo.toml index 62e2256d..005695c7 100644 --- a/zephyr-sys/Cargo.toml +++ b/zephyr-sys/Cargo.toml @@ -14,5 +14,5 @@ Zephyr low-level API bindings. # used by the core Zephyr tree, but are needed by zephyr applications. [build-dependencies] anyhow = "1.0" -bindgen = { version = "0.69.4", features = ["experimental"] } +bindgen = { version = "0.70.1", features = ["experimental"] } # zephyr-build = { version = "0.1.0", path = "../zephyr-build" } diff --git a/zephyr-sys/build.rs b/zephyr-sys/build.rs index 5d43b42d..df5a9519 100644 --- a/zephyr-sys/build.rs +++ b/zephyr-sys/build.rs @@ -70,13 +70,32 @@ fn main() -> Result<()> { .derive_copy(false) .allowlist_function("k_.*") .allowlist_function("gpio_.*") + .allowlist_function("flash_.*") + .allowlist_function("usb_.*") + .allowlist_function("hid_.*") + .allowlist_item("GPIO_.*") + .allowlist_item("USB_.*") + .allowlist_item("FLASH_.*") + .allowlist_item("Z_.*") + .allowlist_item("ZR_.*") + .allowlist_item("K_.*") + .allowlist_item("hid_ops*") + .allowlist_item("uart_line_ctrl") + // Each DT node has a device entry that is a static. + .allowlist_item("__device_dts_ord.*") + .allowlist_function("device_.*") + .allowlist_function("led_.*") .allowlist_function("sys_.*") .allowlist_function("z_log.*") .allowlist_function("bt_.*") + .allowlist_function("SEGGER.*") + .allowlist_function("uart_.*") + .allowlist_function("thread_analyzer.*") .allowlist_item("E.*") .allowlist_item("K_.*") .allowlist_item("ZR_.*") .allowlist_item("LOG_LEVEL_.*") + .allowlist_item("k_poll_modes") // Deprecated .blocklist_function("sys_clock_timeout_end_calc") .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) @@ -93,7 +112,9 @@ fn main() -> Result<()> { fn define_args(bindings: Builder, prefix: &str, var_name: &str) -> Builder { let text = env::var(var_name).unwrap(); let mut bindings = bindings; - for entry in text.split(" ") { + // Split on either spaces or semicolons, to allow some flexibility in what cmake might generate + // for us. + for entry in text.split(&[' ', ';']) { if entry.is_empty() { continue; } diff --git a/zephyr-sys/src/lib.rs b/zephyr-sys/src/lib.rs index 5f317d8c..4cd040a6 100644 --- a/zephyr-sys/src/lib.rs +++ b/zephyr-sys/src/lib.rs @@ -46,5 +46,7 @@ macro_rules! derive_copy { } } -derive_copy!(z_spinlock_key); -derive_clone!(z_spinlock_key); +derive_copy!(z_spinlock_key, led_rgb); +derive_clone!(z_spinlock_key, led_rgb); +derive_copy!(k_timeout_t); +derive_clone!(k_timeout_t); diff --git a/zephyr-sys/wrapper.h b/zephyr-sys/wrapper.h index 8c3ca158..509e2b84 100644 --- a/zephyr-sys/wrapper.h +++ b/zephyr-sys/wrapper.h @@ -11,6 +11,12 @@ * are output. */ +/* + * This is getting built with KERNEL defined, which causes syscalls to not be implemented. Work + * around this by just undefining this symbol. + */ +#undef KERNEL + #ifdef RUST_BINDGEN /* errno is coming from somewhere in Zephyr's build. Add the symbol when running bindgen so that it * is defined here. @@ -35,13 +41,29 @@ extern int errno; #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_THREAD_ANALYZER +#include +#endif /* - * bindgen will output #defined constant that resolve to simple numbers. There are some symbols - * that we want exported that, at least in some situations, are more complex, usually with a type - * case. + * bindgen will only output #defined constants that resolve to simple numbers. These are some + * symbols that we want exported that, at least in some situations, are more complex, usually with a + * type cast. * * We'll use the prefix "ZR_" to avoid conflicts with other symbols. */ const uintptr_t ZR_STACK_ALIGN = Z_KERNEL_STACK_OBJ_ALIGN; const uintptr_t ZR_STACK_RESERVED = K_KERNEL_STACK_RESERVED; + +const uint32_t ZR_POLL_TYPE_SEM_AVAILABLE = K_POLL_TYPE_SEM_AVAILABLE; +const uint32_t ZR_POLL_TYPE_SIGNAL = K_POLL_TYPE_SIGNAL; +const uint32_t ZR_POLL_TYPE_DATA_AVAILABLE = K_POLL_TYPE_DATA_AVAILABLE; diff --git a/zephyr/Cargo.toml b/zephyr/Cargo.toml index 15d183c4..3a4b7f36 100644 --- a/zephyr/Cargo.toml +++ b/zephyr/Cargo.toml @@ -20,6 +20,7 @@ cfg-if = "1.0" # The log create is used if the user desires logging, and calls `set_logger()`. log = "0.4.22" +arrayvec = { version = "0.7.6", default-features = false } [dependencies.fugit] version = "0.3.7" @@ -44,6 +45,11 @@ version = "0.2.2" # should be safe to build the crate even if the Rust code doesn't use it because of configs. features = ["alloc"] +# Gives us an ArrayDeque type to implement a basic ring buffer. +[dependencies.arraydeque] +version = "0.5.1" +default-features = false + # These are needed at build time. # Whether these need to be vendored is an open question. They are not # used by the core Zephyr tree, but are needed by zephyr applications. diff --git a/zephyr/build.rs b/zephyr/build.rs index f4345e95..84f3a782 100644 --- a/zephyr/build.rs +++ b/zephyr/build.rs @@ -14,4 +14,5 @@ fn main() { zephyr_build::export_bool_kconfig(); zephyr_build::build_kconfig_mod(); + zephyr_build::build_dts(); } diff --git a/zephyr/src/device.rs b/zephyr/src/device.rs new file mode 100644 index 00000000..da7fb12a --- /dev/null +++ b/zephyr/src/device.rs @@ -0,0 +1,46 @@ +//! Device wrappers +//! +//! This module contains implementations of wrappers for various types of devices in zephyr. In +//! general, these wrap a `*const device` from Zephyr, and provide an API that is appropriate. +//! +//! Most of these instances come from the device tree. + +// Allow for a Zephyr build that has no devices at all. +#![allow(dead_code)] + +use crate::sync::atomic::{AtomicBool, Ordering}; + +pub mod gpio; +pub mod flash; +pub mod uart; +pub mod led; +pub mod led_strip; + +// Allow dead code, because it isn't required for a given build to have any devices. +/// Device uniqueness. +/// +/// As the zephyr devices are statically defined structures, this `Unique` value ensures that the +/// user is only able to get a single instance of any given device. +/// +/// Note that some devices in zephyr will require more than one instance of the actual device. For +/// example, a [`GpioPin`] will reference a single pin, but the underlying device for the gpio +/// driver will be shared among then. Generally, the constructor for the individual device will +/// call `get_instance_raw()` on the underlying device. +pub(crate) struct Unique(pub(crate) AtomicBool); + +impl Unique { + // Note that there are circumstances where these are in zero-initialized memory, so false must + // be used here, and the result of `once` inverted. + /// Construct a new unique counter. + pub(crate) const fn new() -> Unique { + Unique(AtomicBool::new(false)) + } + + /// Indicates if this particular entity can be used. This function, on a given `Unique` value + /// will return true exactly once. + pub(crate) fn once(&self) -> bool { + // `fetch_add` is likely to be faster than compare_exchage. This does have the limitation + // that `once` is not called more than `usize::MAX` times. + !self.0.fetch_or(true, Ordering::AcqRel) + } +} diff --git a/zephyr/src/device/flash.rs b/zephyr/src/device/flash.rs new file mode 100644 index 00000000..83e277c3 --- /dev/null +++ b/zephyr/src/device/flash.rs @@ -0,0 +1,59 @@ +//! Device wrappers for flash controllers, and flash partitions. + +// Note that currently, the flash partition shares the controller, so the underlying operations +// are not actually safe. Need to rethink how to manage this. + +use crate::raw; +use super::Unique; + +/// A flash controller +/// +/// This is a wrapper around the `struct device` in Zephyr that represents a flash controller. +/// Using the flash controller allows flash operations on the entire device. See +/// [`FlashPartition`] for a wrapper that limits the operation to a partition as defined in the +/// DT. +#[allow(dead_code)] +pub struct FlashController { + pub(crate) device: *const raw::device, +} + +impl FlashController { + /// Constructor, intended to be called by devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device) -> Option { + if !unique.once() { + return None; + } + + Some(FlashController { device }) + } +} + +/// A wrapper for flash partitions. There is no Zephyr struct that corresponds with this +/// information, which is typically used in a more direct underlying manner. +#[allow(dead_code)] +pub struct FlashPartition { + /// The underlying controller. + #[allow(dead_code)] + pub(crate) controller: FlashController, + #[allow(dead_code)] + pub(crate) offset: u32, + #[allow(dead_code)] + pub(crate) size: u32, +} + +impl FlashPartition { + /// Constructor, intended to be called by devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device, offset: u32, size: u32) -> Option { + if !unique.once() { + return None; + } + + // The `get_instance` on the flash controller would try to guarantee a unique instance, + // but in this case, we need one for each device, so just construct it here. + // TODO: This is not actually safe. + let controller = FlashController { device }; + Some(FlashPartition { controller, offset, size }) + } +} diff --git a/zephyr/src/device/gpio.rs b/zephyr/src/device/gpio.rs new file mode 100644 index 00000000..2c9b50c4 --- /dev/null +++ b/zephyr/src/device/gpio.rs @@ -0,0 +1,144 @@ +//! Most devices in Zephyr operate on a `struct device`. This provides untyped access to +//! devices. We want to have stronger typing in the Zephyr interfaces, so most of these types +//! will be wrapped in another structure. This wraps a Gpio device, and provides methods to +//! most of the operations on gpios. +//! +//! Safey: In general, even just using gpio pins is unsafe in Zephyr. The gpio drivers are used +//! pervasively throughout Zephyr device drivers. As such, most of the calls in this module are +//! unsafe. + +use core::ffi::c_int; + +use crate::raw; +use super::Unique; + +/// Global instance to help make gpio in Rust slightly safer. +/// +/// # Safety +/// +/// To help with safety, the rust types use a global instance of a gpio-token. Methods will +/// take a mutable reference to this, which will require either a single thread in the +/// application code, or something like a mutex or critical section to manage. The operation +/// methods are still unsafe, because we have no control over what happens with the gpio +/// operations outside of Rust code, but this will help make the Rust usage at least better. +pub struct GpioToken(()); + +static GPIO_TOKEN: Unique = Unique::new(); + +impl GpioToken { + /// Retrieves the gpio token. This is unsafe because lots of code in zephyr operates on the + /// gpio drivers. + pub unsafe fn get_instance() -> Option { + if !GPIO_TOKEN.once() { + return None; + } + Some(GpioToken(())) + } +} + +/// A single instance of a zephyr device to manage a gpio controller. A gpio controller +/// represents a set of gpio pins, that are generally operated on by the same hardware block. +pub struct Gpio { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, +} + +// SAFETY: Gpio's can be shared with other threads. Safety is maintained by the Token. +unsafe impl Send for Gpio {} + +impl Gpio { + /// Constructor, used by the devicetree generated code. + /// + /// TODO: Guarantee single instancing. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device) -> Option { + if !unique.once() { + return None; + } + Some(Gpio { device }) + } + + /// Verify that the device is ready for use. At a minimum, this means the device has been + /// successfully initialized. + pub fn is_ready(&self) -> bool { + unsafe { + raw::device_is_ready(self.device) + } + } +} + +/// A GpioPin represents a single pin on a gpio device. +/// +/// This is a lightweight wrapper around the Zephyr `gpio_dt_spec` structure. Note that +/// multiple pins may share a gpio controller, and as such, all methods on this are both unsafe, +/// and require a mutable reference to the [`GpioToken`]. +#[allow(dead_code)] +pub struct GpioPin { + pub(crate) pin: raw::gpio_dt_spec, +} + +// SAFETY: GpioPin's can be shared with other threads. Safety is maintained by the Token. +unsafe impl Send for GpioPin {} + +impl GpioPin { + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device, pin: u32, dt_flags: u32) -> Option { + if !unique.once() { + return None; + } + Some(GpioPin { + pin: raw::gpio_dt_spec { + port: device, + pin: pin as raw::gpio_pin_t, + dt_flags: dt_flags as raw::gpio_dt_flags_t, + } + }) + } + + /// Verify that the device is ready for use. At a minimum, this means the device has been + /// successfully initialized. + pub fn is_ready(&self) -> bool { + self.get_gpio().is_ready() + } + + /// Get the underlying Gpio device. + pub fn get_gpio(&self) -> Gpio { + Gpio { + device: self.pin.port, + } + } + + /// Configure a single pin. + pub unsafe fn configure(&mut self, _token: &mut GpioToken, extra_flags: raw::gpio_flags_t) { + // TODO: Error? + unsafe { + raw::gpio_pin_configure(self.pin.port, + self.pin.pin, + self.pin.dt_flags as raw::gpio_flags_t | extra_flags); + } + } + + /// Toggle pin level. + pub unsafe fn toggle_pin(&mut self, _token: &mut GpioToken) { + // TODO: Error? + unsafe { + raw::gpio_pin_toggle_dt(&self.pin); + } + } + + /// Set the logical level of the pin. + pub unsafe fn set(&mut self, _token: &mut GpioToken, value: bool) { + raw::gpio_pin_set_dt(&self.pin, value as c_int); + } + + /// Read the logical level of the pin. + pub unsafe fn get(&mut self, _token: &mut GpioToken) -> bool { + match raw::gpio_pin_get_dt(&self.pin) { + 0 => false, + 1 => true, + _ => panic!("TODO: Handle gpio get error"), + } + } +} diff --git a/zephyr/src/device/led.rs b/zephyr/src/device/led.rs new file mode 100644 index 00000000..f00fb6eb --- /dev/null +++ b/zephyr/src/device/led.rs @@ -0,0 +1,44 @@ +//! LED driver, with support for PWMLED driver, with support for PWM. + +use crate::raw; +use crate::error::{Result, to_result_void}; + +use super::Unique; + +/// A simple led strip wrapper. +pub struct Leds { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, + /// How many are configured in the DT. + pub(crate) count: usize, +} + +// This is send, safe with Zephyr. +unsafe impl Send for Leds { } + +impl Leds { + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device, count: usize) -> Option { + if !unique.once() { + return None; + } + + Some(Leds { device, count }) + } + + /// Return the number of LEDS. + pub fn len(&self) -> usize { + self.count + } + + /// Set the brightness of one of the described LEDs + pub unsafe fn set_brightness(&mut self, index: usize, value: u8) -> Result<()> { + to_result_void(unsafe { + raw::led_set_brightness(self.device, + index as u32, + value) + }) + } +} diff --git a/zephyr/src/device/led_strip.rs b/zephyr/src/device/led_strip.rs new file mode 100644 index 00000000..2f6ace23 --- /dev/null +++ b/zephyr/src/device/led_strip.rs @@ -0,0 +1,45 @@ +//! Simple led strip driver + +use crate::raw; +use crate::error::{Result, to_result_void}; + +use super::Unique; + +/// A simple led strip wrapper. +pub struct LedStrip { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, +} + +// This is send, safe with Zephyr. +unsafe impl Send for LedStrip { } + +impl LedStrip { + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device) -> Option { + if !unique.once() { + return None; + } + + Some(LedStrip { device }) + } + + /// Return the number of LEDS in the chain. + pub fn chain_len(&self) -> usize { + unsafe { raw::led_strip_length(self.device) as usize} + } + + /// Update the state of the LEDs. + /// + /// It is unclear from the API docs whether this is supposed to be an array of rgb_led + /// (which is what the samples assume), or packed rgb data. + pub unsafe fn update(&mut self, channels: &[raw::led_rgb]) -> Result<()> { + to_result_void(unsafe { + raw::led_strip_update_rgb(self.device, + channels.as_ptr() as *mut _, + channels.len()) + }) + } +} diff --git a/zephyr/src/device/uart.rs b/zephyr/src/device/uart.rs new file mode 100644 index 00000000..d98b0b09 --- /dev/null +++ b/zephyr/src/device/uart.rs @@ -0,0 +1,166 @@ +//! Simple (and unsafe) wrappers around USB devices. + +// TODO! Remove this. +#![allow(dead_code)] +#![allow(unused_variables)] + +use crate::raw; +use crate::error::{Error, Result, to_result_void, to_result}; +use crate::printkln; + +use core::ffi::{c_int, c_uchar, c_void}; +use core::ptr; + +use super::Unique; + +#[cfg(CONFIG_RUST_ALLOC)] +mod irq; +#[cfg(CONFIG_RUST_ALLOC)] +pub use irq::UartIrq; + +/// A wrapper around a UART device on Zephyr. +pub struct Uart { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, +} + +/// Uart control values. +/// +/// This mirrors these definitions from C, but as an enum. +#[repr(u32)] +pub enum LineControl { + /// Baud rate + BaudRate = raw::uart_line_ctrl_UART_LINE_CTRL_BAUD_RATE, + /// Request To Send (RTS) + RTS = raw::uart_line_ctrl_UART_LINE_CTRL_RTS, + /// Data Terminal Ready (DTR) + DTR = raw::uart_line_ctrl_UART_LINE_CTRL_DTR, + /// Data Carrier Detect (DCD) + DCD = raw::uart_line_ctrl_UART_LINE_CTRL_DCD, + /// Data Set Ready (DSR) + DSR = raw::uart_line_ctrl_UART_LINE_CTRL_DSR, +} + +impl Uart { + // Note that the `poll_in` and `poll_out` are terrible. + + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new(unique: &Unique, device: *const raw::device) -> Option { + if !unique.once() { + return None; + } + + Some(Uart { device }) + } + + /// Attempt to read a character from the UART fifo. + /// + /// Will return Ok(Some(ch)) if there is a character available, `Ok(None)` if no character + /// is available, or `Err(e)` if there was an error. + pub unsafe fn poll_in(&mut self) -> Result> { + let mut ch: c_uchar = 0; + + match to_result_void(unsafe { raw::uart_poll_in(self.device, &mut ch) }) { + Ok(()) => Ok(Some(ch as u8)), + Err(Error(1)) => Ok(None), + Err(e) => Err(e), + } + } + + /// Attempt to write to the outgoing FIFO. + /// + /// This writes to the outgoing UART fifo. This will block if the outgoing fifo is full. + pub unsafe fn poll_out(&mut self, out_char: u8) { + unsafe { raw::uart_poll_out(self.device, out_char as c_uchar) } + } + + /// Fill FIFO with data. + /// + /// This is unspecified what happens if this is not called from IRQ context. + /// Returns Ok(n) for the number of bytes sent. + pub unsafe fn fifo_fill(&mut self, data: &[u8]) -> Result { + to_result(unsafe { + raw::uart_fifo_fill(self.device, data.as_ptr(), data.len() as c_int) + }).map(|count| count as usize) + } + + /// Drain FIFO. + /// + /// This is unspecified as to what happens if not called from IRQ context. + pub unsafe fn fifo_read(&mut self, data: &mut [u8]) -> Result { + to_result(unsafe { + raw::uart_fifo_read(self.device, data.as_mut_ptr(), data.len() as c_int) + }).map(|count| count as usize) + } + + /// Read one of the UART line control values. + pub unsafe fn line_ctrl_get(&self, item: LineControl) -> Result { + let mut result: u32 = 0; + to_result_void(unsafe { + raw::uart_line_ctrl_get(self.device, item as u32, &mut result) + }).map(|()| result) + } + + /// Set one of the UART line control values. + pub unsafe fn line_ctrl_set(&mut self, item: LineControl, value: u32) -> Result<()> { + to_result_void(unsafe { + raw::uart_line_ctrl_set(self.device, item as u32, value) + }) + } + + /// Convenience, return if DTR is asserted. + pub unsafe fn is_dtr_set(&self) -> Result { + let ret = unsafe { + self.line_ctrl_get(LineControl::DTR)? + }; + Ok(ret == 1) + } + + /// Convert this UART into an async one. + pub unsafe fn into_async(self) -> Result { + UartAsync::new(self) + } + + /// Convert into an IRQ one. The parameters `WS` and `RS` set the size of the rings for write + /// and read respectively. + #[cfg(CONFIG_RUST_ALLOC)] + pub unsafe fn into_irq(self) -> Result> { + UartIrq::new(self) + } +} + +/// The uart is safe to Send, as long as it is only used from one thread at a time. As such, it is +/// not Sync. +unsafe impl Send for Uart {} + +/// This is the async interface to the uart. +/// +/// Until we can analyze this for safety, it will just be declared as unsafe. +/// +/// It is unclear from the docs what context this callback api is called from, so we will assume +/// that it might be called from an irq. As such, we'll need to use a critical-section and it's +/// mutex to protect the data. +pub struct UartAsync(); + +impl UartAsync { + /// Take a Uart device and turn it into an async interface. + /// + /// TODO: Return the uart back if this fails. + pub unsafe fn new(uart: Uart) -> Result { + let ret = unsafe { + raw::uart_callback_set(uart.device, Some(async_callback), ptr::null_mut()) + }; + to_result_void(ret)?; + Ok(UartAsync()) + } +} + +extern "C" fn async_callback( + _dev: *const raw::device, + _evt: *mut raw::uart_event, + _user_data: *mut c_void, +) { + printkln!("Async"); +} diff --git a/zephyr/src/device/uart/irq.rs b/zephyr/src/device/uart/irq.rs new file mode 100644 index 00000000..32af6f6e --- /dev/null +++ b/zephyr/src/device/uart/irq.rs @@ -0,0 +1,394 @@ +//! Simple (and unsafe) wrappers around USB devices. + +// TODO! Remove this. +#![allow(dead_code)] +#![allow(unused_variables)] + +extern crate alloc; + +// TODO: This should be a generic Buffer type indicating some type of owned buffer, with Vec as one +// possible implementation. +use alloc::vec::Vec; + +use arraydeque::ArrayDeque; + +use crate::raw; +use crate::error::{Result, to_result_void}; +use crate::sys::sync::Semaphore; +use crate::sync::{Arc, SpinMutex}; +use crate::time::Timeout; + +use core::ffi::c_void; +use core::ops::Range; +use core::{fmt, result}; + +use super::Uart; + +/// The "outer" struct holds the semaphore, and the mutex. The semaphore has to live outside of the +/// mutex because it can only be waited on when the Mutex is not locked. +struct IrqOuterData { + read_sem: Semaphore, + /// Write semaphore. This should **exactly** match the number of elements in `write_dones`. + write_sem: Semaphore, + inner: SpinMutex>, +} + +/// Data for communication with the UART IRQ. +struct IrqInnerData { + /// Write request. The 'head' is the one being worked on. Once completed, they will move into + /// the completion queue. + write_requests: ArrayDeque, + /// Completed writes. + write_dones: ArrayDeque, + /// Read requests. The 'head' is the one data will come into. + read_requests: ArrayDeque, + /// Completed writes. Generally, these will be full, but a read might move an early one here. + read_dones: ArrayDeque, +} + +/// A single requested write. This is a managed buffer, and a range of the buffer to actually +/// write. The write is completed when `pos` == `len`. +struct WriteRequest { + /// The data to write. + data: Vec, + /// What part to write. + part: Range, +} + +/// A completed write. All the requested data will have been written, and this returns the buffer +/// to the user. +struct WriteDone { + /// The returned buffer. + data: Vec, +} + +/// A single read request. This is a buffer to hold data being read, along with the part still +/// valid to hold data. +struct ReadRequest { + /// The data to read. + data: Vec, + /// How much of the data has been read so far. + len: usize, +} + +impl ReadRequest { + fn into_done(self) -> ReadDone { + ReadDone { data: self.data, len: self.len } + } +} + +/// A completed read. +struct ReadDone { + /// The buffer holding the data. + data: Vec, + /// How much of `data` contains read data. Should always be > 0. + len: usize, +} + +impl ReadDone { + fn into_result(self) -> ReadResult { + ReadResult { data: self.data, len: self.len } + } +} + +/// The result of a read. +pub struct ReadResult { + data: Vec, + len: usize, +} + +impl ReadResult { + pub fn as_slice(&self) -> &[u8] { + &self.data[..self.len] + } + + pub fn into_inner(self) -> Vec { + self.data + } +} + +/// The error type from write requests. Used to return the buffer. +pub struct WriteError(pub Vec); + +// The default Debug for Write error will print the whole buffer, which isn't particularly useful. +impl fmt::Debug for WriteError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "WriteError(...)") + } +} + +/// The error type from read requests. Used to return the buffer. +pub struct ReadError(pub Vec); + +// The default Debug for Write error will print the whole buffer, which isn't particularly useful. +impl fmt::Debug for ReadError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ReadError(...)") + } +} + +/// The wait for write completion timed out. +pub struct WriteWaitTimedOut; + +/// The wait for read completion timed out. +pub struct ReadWaitTimedOut; + +/// An interface to the UART, that uses the "legacy" IRQ API. +/// +/// The interface is parameterized by two value, `WS` is the number of elements in the write ring, +/// and `RS` is the number of elements in the read ring. Each direction will have two rings, one +/// for pending operations, and the other for completed operations. Setting these to 2 is +/// sufficient to avoid stalling writes or dropping reads, but requires the application attend to +/// the buffers. +pub struct UartIrq { + /// Interior wrapped device, to be able to hand out lifetime managed references to it. + uart: Uart, + /// Critical section protected data. + data: Arc>, +} + +// UartIrq is also Send, !Sync, for the same reasons as for Uart. +unsafe impl Send for UartIrq {} + +impl UartIrq { + /// Convert uart into irq driven one. + pub unsafe fn new(uart: Uart) -> Result { + let data = Arc::new(IrqOuterData { + read_sem: Semaphore::new(0, RS as u32)?, + write_sem: Semaphore::new(0, WS as u32)?, + inner: SpinMutex::new(IrqInnerData { + write_requests: ArrayDeque::new(), + write_dones: ArrayDeque::new(), + read_requests: ArrayDeque::new(), + read_dones: ArrayDeque::new(), + }), + }); + + // Clone the arc, and convert to a raw pointer, to give to the callback. + // This will leak the Arc (which prevents deallocation). + let data_raw = Arc::into_raw(data.clone()); + let data_raw = data_raw as *mut c_void; + + let ret = unsafe { + raw::uart_irq_callback_user_data_set(uart.device, Some(irq_callback::), data_raw) + }; + to_result_void(ret)?; + // Should this be settable? + unsafe { + // raw::uart_irq_tx_enable(uart.device); + raw::uart_irq_rx_enable(uart.device); + } + Ok(UartIrq { + data, + uart, + }) + } + + /// Get the underlying UART to be able to change line control and such. + /// + /// TODO: This really should return something like `&Uart` to bind the lifetime. Otherwise the + /// user can continue to use the uart handle beyond the lifetime of the driver. + pub unsafe fn inner(&mut self) -> &Uart { + &self.uart + } + + /// Enqueue a single write request. + /// + /// If the queue is full, the `WriteError` returned will return the buffer. + pub fn write_enqueue(&mut self, data: Vec, part: Range) -> result::Result<(), WriteError> { + let mut inner = self.data.inner.lock().unwrap(); + + let req = WriteRequest { data, part }; + match inner.write_requests.push_back(req) { + Ok(()) => { + // Make sure the write actually happens. This needs to happen for the first message + // queued, if some were already queued, it should already be enabled. + if inner.write_requests.len() == 1 { + unsafe { raw::uart_irq_tx_enable(self.uart.device); } + } + Ok(()) + } + Err(e) => Err(WriteError(e.element.data)), + } + } + + /// Return true if the write queue is full. + /// + /// There is a race between this and write_enqueue, but only in the safe direction (this may + /// return false, but a write may complete before being able to call write_enqueue). + pub fn write_is_full(&self) -> bool { + let inner = self.data.inner.lock().unwrap(); + + inner.write_requests.is_full() + } + + /// Retrieve a write completion. + /// + /// Waits up to `timeout` for a write to complete, and returns the buffer. + pub fn write_wait(&mut self, timeout: T) -> result::Result, WriteWaitTimedOut> + where T: Into, + { + match self.data.write_sem.take(timeout) { + Ok(()) => (), + // TODO: Handle other errors? + Err(_) => return Err(WriteWaitTimedOut), + } + + let mut inner = self.data.inner.lock().unwrap(); + Ok(inner.write_dones.pop_front().expect("Write done empty, despite semaphore").data) + } + + /// Enqueue a buffer for reading data. + /// + /// Enqueues a buffer to hold read data. Can enqueue up to RS of these. + pub fn read_enqueue(&mut self, data: Vec) -> result::Result<(), ReadError> { + let mut inner = self.data.inner.lock().unwrap(); + + let req = ReadRequest { data, len: 0 }; + match inner.read_requests.push_back(req) { + Ok(()) => { + // Enable the rx fifo so incoming data will be placed. + if inner.read_requests.len() == 1 { + unsafe { raw::uart_irq_rx_enable(self.uart.device); } + } + Ok(()) + } + Err(e) => Err(ReadError(e.element.data)) + } + } + + /// Wait up to 'timeout' for a read to complete, and returns the data. + /// + /// Note that if there is a buffer that has been partially filled, this will return that buffer, + /// so that there isn't a delay with read data. + pub fn read_wait(&mut self, timeout: T) -> result::Result + where T: Into, + { + // If there is no read data available, see if we have a partial block we can consider a + // completion. + let mut inner = self.data.inner.lock().unwrap(); + if inner.read_dones.is_empty() { + if let Some(req) = inner.read_requests.pop_front() { + // TODO: User defined threshold? + if req.len > 0 { + // Queue this up as a completion. + inner.read_dones.push_back(req.into_done()).unwrap(); + + // Signal the sem, as we've pushed. + self.data.read_sem.give(); + } else { + // Stick it back on the queue. + inner.read_requests.push_front(req).unwrap(); + } + } + } + drop(inner); + + match self.data.read_sem.take(timeout) { + Ok(()) => (), + // TODO: Handle other errors? + Err(_) => return Err(ReadWaitTimedOut), + } + + let mut inner = self.data.inner.lock().unwrap(); + let done = inner.read_dones.pop_front().expect("Semaphore mismatched with read done queue"); + Ok(done.into_result()) + } +} + +// TODO: It could actually be possible to implement drop, but we would need to make sure the irq +// handlers are deregistered. These is also the issue of the buffers being dropped. For now, just +// panic, as this isn't normal. +impl Drop for UartIrq { + fn drop(&mut self) { + panic!("UartIrq dropped"); + } +} + +extern "C" fn irq_callback( + dev: *const raw::device, + user_data: *mut c_void, +) { + // Convert our user data, back to the CS Mutex. + let outer = unsafe { &*(user_data as *const IrqOuterData) }; + let mut inner = outer.inner.lock().unwrap(); + + // Handle any read requests. + loop { + if let Some(mut req) = inner.read_requests.pop_front() { + if req.len == req.data.len() { + // This buffer is full, make it a completion. + inner.read_dones.push_back(req.into_done()) + .expect("Completion queue not large enough"); + outer.read_sem.give(); + } else { + // Read as much as we can. + let piece = &mut req.data[req.len..]; + let count = unsafe { + raw::uart_fifo_read(dev, piece.as_mut_ptr(), piece.len() as i32) + }; + if count < 0 { + panic!("Incorrect use of read"); + } + let count = count as usize; + + // Adjust the piece. The next time through the loop will notice if the write is + // full. + req.len += count; + inner.read_requests.push_front(req) + .expect("Unexpected read request overflow"); + + if count == 0 { + // There is no more data in the fifo. + break; + } + } + } else { + // No place to store results. Turn off the irq and stop. + // The doc's don't describe this as being possible, but hopefully the implementations + // are sane. + unsafe { raw::uart_irq_rx_disable(dev); } + break; + } + } + + // Handle any write requests. + loop { + if let Some(mut req) = inner.write_requests.pop_front() { + if req.part.is_empty() { + // This request is empty. Move to completion. + inner.write_dones.push_back(WriteDone { data: req.data }) + .expect("write done queue is full"); + outer.write_sem.give(); + } else { + // Try to write this part of the data. + let piece = &req.data[req.part.clone()]; + let count = unsafe { + raw::uart_fifo_fill(dev, piece.as_ptr(), piece.len() as i32) + }; + if count < 0 { + panic!("Incorrect use of device fifo"); + } + let count = count as usize; + + // Adjust the part. The next through the loop will notice the write being done. + req.part.start += count; + inner.write_requests.push_front(req) + .expect("Unexpected write_dones overflow"); + + // If the count reaches 0, the fifo is full. + if count == 0 { + break; + } + } + } else { + // No work. Turn off the irq, and stop. + unsafe { raw::uart_irq_tx_disable(dev); } + break; + } + } + + unsafe { + raw::uart_irq_update(dev); + } +} diff --git a/zephyr/src/kio.rs b/zephyr/src/kio.rs new file mode 100644 index 00000000..89e8936a --- /dev/null +++ b/zephyr/src/kio.rs @@ -0,0 +1,178 @@ +//! Async IO for Zephyr +//! +//! This implements the basics of using Zephyr's work queues to implement async code on Zephyr. +//! +//! Most of the work happens in [`work`] and in [`futures`] +//! +//! [`work`]: crate::work +//! [`futures`]: crate::work::futures + +use core::ffi::CStr; +use core::task::{Context, Poll}; +use core::{future::Future, pin::Pin}; + +use crate::sys::queue::Queue; +use crate::sys::sync::Semaphore; +use crate::time::{NoWait, Timeout}; +use crate::work::futures::WakeInfo; +use crate::work::Signal; +use crate::work::{futures::JoinHandle, futures::WorkBuilder, WorkQueue}; + +pub mod sync; + +pub use crate::work::futures::sleep; + +/// Run an async future on the given worker thread. +/// +/// Arrange to have the given future run on the given worker thread. The resulting `JoinHandle` has +/// `join` and `join_async` methods that can be used to wait for the given thread. +pub fn spawn(future: F, worker: &WorkQueue, name: &'static CStr) -> JoinHandle +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + WorkBuilder::new() + .set_worker(worker) + .set_name(name) + .start(future) +} + +/// Run an async future on the current worker thread. +/// +/// Arrange to have the given future run on the current worker thread. The resulting `JoinHandle` +/// has `join` and `join_async` methods that can be used to wait for the given thread. +/// +/// The main use for this is to allow work threads to use `Rc` and `Rc>` within async +/// tasks. The main constraint is that references inside cannot be held across an `.await`. +/// +/// # Panics +/// If this is called other than from a worker task running on a work thread, it will panic. +pub fn spawn_local(future: F, name: &'static CStr) -> JoinHandle +where + F: Future + 'static, + F::Output: Send + 'static, +{ + WorkBuilder::new() + .set_name(name) + .start_local(future) +} + +/// Yield the current thread, returning it to the work queue to be run after other work on that +/// queue. (This has to be called `yield_now` in Rust, because `yield` is a keyword. +pub fn yield_now() -> impl Future { + YieldNow { waited: false } +} + +struct YieldNow { + waited: bool, +} + +impl Future for YieldNow { + type Output = (); + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> core::task::Poll { + if self.waited { + Poll::Ready(()) + } else { + // Enqueue outselves with no wait and no events. + let info = unsafe { WakeInfo::from_context(cx) }; + + // Unsafely check if the work queue running us is empty. We only check explicitly + // specified workers (TODO access the system work queue). The check is racy, but should + // always fail indicating that the queue is not empty when it could be. Checking this + // avoids re-scheduling the only worker back into the queue. + // SAFETY: The check is racy, but will fail with us yielding when we didn't need to. + if let Some(wq) = info.queue { + let wq = unsafe { wq.as_ref() }; + if wq.pending.head == wq.pending.tail { + return Poll::Ready(()); + } + } + + info.timeout = NoWait.into(); + self.waited = true; + + Poll::Pending + } + } +} + +/// Extensions on [`Context`] to support scheduling via Zephyr's workqueue system. +/// +/// All of these are called from within the context of running work, and indicate what _next_ +/// should cause this work to be run again. If none of these methods are called before the work +/// exits, the work will be scheduled to run after `Forever`, which is not useful. There may be +/// later support for having a `Waker` that can schedule work from another context. +/// +/// Note that the events to wait on, such as Semaphores or channels, if there are multiple threads +/// that can wait for them, might cause this worker to run, but not actually be available. As such, +/// to maintain the non-blocking requirements of Work, [`Semaphore::take`], and the blocking `send` +/// and `recv` operations on channels should not be used, even after being woken. +/// +/// For the timeout [`Forever`] is useful to indicate there is no timeout. If called with +/// [`NoWait`], the work will be immediately scheduled. In general, it is better to query the +/// underlying object directly rather than have the overhead of being rescheduled. +/// +/// # Safety +/// +/// The lifetime bounds on the items waited for ensure that these items live at least as long as the +/// work queue. Practically, this can only be satisfied by using something with 'static' lifetime, +/// or embedding the value in the Future itself. +/// +/// With the Zephyr executor, the `Context` is embedded within a `WakeInfo` struct, which this makes +/// use of. If a different executor were to be used, these calls would result in undefined +/// behavior. +/// +/// This could be checked at runtime, but it would have runtime cost. +/// +/// [`Forever`]: crate::time::Forever +pub trait ContextExt { + /// Indicate the work should next be scheduled based on a semaphore being available for "take". + /// + /// The work will be scheduled either when the given semaphore becomes available to 'take', or + /// after the timeout. + fn add_semaphore<'a>(&'a mut self, sem: &'a Semaphore, timeout: impl Into); + + /// Indicate that the work should be scheduled after receiving the given [`Signal`], or the + /// timeout occurs. + fn add_signal<'a>(&'a mut self, signal: &'a Signal, timeout: impl Into); + + /// Indicate that the work should be scheduled when the given [`Queue`] has data available to + /// recv, or the timeout occurs. + fn add_queue<'a>(&'a mut self, queue: &'a Queue, timeout: impl Into); + + /// Indicate that the work should just be scheduled after the given timeout. + /// + /// Note that this only works if none of the other wake methods are called, as those also set + /// the timeout. + fn add_timeout(&mut self, timeout: impl Into); +} + +/// Implementation of ContextExt for the Rust [`Context`] type. +impl<'b> ContextExt for Context<'b> { + fn add_semaphore<'a>(&'a mut self, sem: &'a Semaphore, timeout: impl Into) { + let info = unsafe { WakeInfo::from_context(self) }; + info.add_semaphore(sem); + info.timeout = timeout.into(); + } + + fn add_signal<'a>(&'a mut self, signal: &'a Signal, timeout: impl Into) { + let info = unsafe { WakeInfo::from_context(self) }; + info.add_signal(signal); + info.timeout = timeout.into(); + } + + fn add_queue<'a>(&'a mut self, queue: &'a Queue, timeout: impl Into) { + let info = unsafe { WakeInfo::from_context(self) }; + info.add_queue(queue); + info.timeout = timeout.into(); + } + + fn add_timeout(&mut self, timeout: impl Into) { + let info = unsafe { WakeInfo::from_context(self) }; + info.timeout = timeout.into(); + } +} diff --git a/zephyr/src/kio/sync.rs b/zephyr/src/kio/sync.rs new file mode 100644 index 00000000..bb7cc848 --- /dev/null +++ b/zephyr/src/kio/sync.rs @@ -0,0 +1,126 @@ +//! Synchronization mechanisms that work with async. +//! +//! Notably, Zephyr's `k_mutex` type isn't supported as a type that can be waited for +//! asynchronously. +//! +//! The main problem with `k_mutex` (meaning [`crate::sync::Mutex`]) is that the `lock` operation +//! can block, and since multiple tasks may be scheduled for the same work queue, the system can +//! deadlock, as the scheduler may not run to allow the task that actually holds the mutex to run. +//! +//! As an initial stopgap. We provide a [`Mutex`] type that is usable within an async context. We +//! do not currently implement an associated `Condvar`. +//! +//! Note that using Semaphores for locking means that this mechanism doesn't handle priority +//! inversion issues. Be careful with workers that run at different priorities. + +// Use the same error types from the regular sync version. + +use core::{ + cell::UnsafeCell, + fmt, + marker::PhantomData, + ops::{Deref, DerefMut}, +}; + +use crate::{ + sync::{LockResult, TryLockError, TryLockResult}, + sys::sync::Semaphore, + time::{Forever, NoWait}, +}; + +/// A mutual exclusion primitive useful for protecting shared data. Async version. +/// +/// This mutex will block a task waiting for the lock to become available. +pub struct Mutex { + /// The semaphore indicating ownership of the data. When it is "0" the task that did the 'take' + /// on it owns the data, and will use `give` when it is unlocked. This mechanism works for + /// simple Mutex that protects the data without needing a condition variable. + inner: Semaphore, + data: UnsafeCell, +} + +// SAFETY: The semaphore, with the semantics provided here, provide Send and Sync. +unsafe impl Send for Mutex {} +unsafe impl Sync for Mutex {} + +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Mutex {:?}", self.inner) + } +} + +/// An RAII implementation of a held lock. +pub struct MutexGuard<'a, T: ?Sized + 'a> { + lock: &'a Mutex, + // Mark !Send explicitly until support is added to Rust for this. + _nosend: PhantomData>, +} + +// unsafe impl Sync for MutexGuard<'_, T> {} +unsafe impl Sync for MutexGuard<'_, T> {} + +impl Mutex { + /// Construct a new Mutex. + pub fn new(t: T) -> Mutex { + Mutex { + inner: Semaphore::new(1, 1).unwrap(), + data: UnsafeCell::new(t), + } + } +} + +impl Mutex { + /// Acquire the mutex, blocking the current thread until it is able to do so. + /// + /// This is a sync version, and calling it from an async task will possibly block the async work + /// thread, potentially causing deadlock. + pub fn lock(&self) -> LockResult> { + self.inner.take(Forever).unwrap(); + unsafe { Ok(MutexGuard::new(self)) } + } + + /// Aquire the mutex, async version. + pub async fn lock_async(&self) -> LockResult> { + self.inner.take_async(Forever).await.unwrap(); + unsafe { Ok(MutexGuard::new(self)) } + } + + /// Attempt to aquire the lock. + pub fn try_lock(&self) -> TryLockResult> { + match self.inner.take(NoWait) { + Ok(()) => unsafe { Ok(MutexGuard::new(self)) }, + // TODO: Distinguish timeout from other errors. + Err(_) => Err(TryLockError::WouldBlock), + } + } +} + +impl<'mutex, T: ?Sized> MutexGuard<'mutex, T> { + unsafe fn new(lock: &'mutex Mutex) -> MutexGuard<'mutex, T> { + MutexGuard { + lock, + _nosend: PhantomData, + } + } +} + +impl Deref for MutexGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + unsafe { &*self.lock.data.get() } + } +} + +impl DerefMut for MutexGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + unsafe { &mut *self.lock.data.get() } + } +} + +impl Drop for MutexGuard<'_, T> { + #[inline] + fn drop(&mut self) { + self.lock.inner.give(); + } +} diff --git a/zephyr/src/lib.rs b/zephyr/src/lib.rs index c92ceb4c..05c60fd0 100644 --- a/zephyr/src/lib.rs +++ b/zephyr/src/lib.rs @@ -11,14 +11,21 @@ #![deny(missing_docs)] pub mod align; +pub mod device; pub mod error; pub mod logging; pub mod object; +#[cfg(CONFIG_RUST_ALLOC)] +pub mod simpletls; pub mod sync; pub mod sys; pub mod time; #[cfg(CONFIG_RUST_ALLOC)] pub mod timer; +#[cfg(CONFIG_RUST_ALLOC)] +pub mod work; +#[cfg(CONFIG_RUST_ALLOC)] +pub mod kio; pub use error::{Error, Result}; @@ -43,6 +50,22 @@ pub mod kconfig { include!(concat!(env!("OUT_DIR"), "/kconfig.rs")); } +pub mod devicetree { + //! Zephyr device tree + //! + //! This is an auto-generated module that represents the device tree for a given build. The + //! hierarchy here should match the device tree, with an additional top-level module "labels" + //! that contains submodules for all of the labels. + //! + //! **Note**: Unless you are viewing docs generated for a specific build, the values below are + //! unlikely to directly correspond to those in a given build. + + // Don't enforce doc comments on the generated device tree. + #![allow(missing_docs)] + + include!(concat!(env!("OUT_DIR"), "/devicetree.rs")); +} + // Ensure that Rust is enabled. #[cfg(not(CONFIG_RUST))] compile_error!("CONFIG_RUST must be set to build Rust in Zephyr"); @@ -91,3 +114,11 @@ pub mod _export { // If allocation has been requested, provide the allocator. #[cfg(CONFIG_RUST_ALLOC)] pub mod alloc_impl; + +#[cfg(CONFIG_RUST_ALLOC)] +pub mod task { + //! Provides the portable-atomic version of `alloc::task::Wake`, which uses the compatible + //! versionm of Arc. + + pub use portable_atomic_util::task::Wake; +} diff --git a/zephyr/src/simpletls.rs b/zephyr/src/simpletls.rs new file mode 100644 index 00000000..ec892edf --- /dev/null +++ b/zephyr/src/simpletls.rs @@ -0,0 +1,123 @@ +//! A simple TLS helping tool. +//! +//! Until this crate implements general TLS support, similar to std, this simpletls module can +//! provide a simplified type of thread-local storage. + +extern crate alloc; + +use core::{ptr, sync::atomic::Ordering}; + +use alloc::boxed::Box; +use alloc::vec::Vec; +use zephyr_sys::{k_current_get, k_thread}; + +use crate::sync::{atomic::AtomicPtr, Mutex}; + +/// A container for simple thread local storage. +/// +/// This will maintain a mapping between Zephyr threads and a value of type T. Entries will have to +/// be added manually, generally when each thread is started. +/// +/// Note that T must implement Copy, as it is not safe to retain references to the inner data +/// outside of this api. +/// +/// T must also implement Send, since although 'get' always retrieves the current thread's data, +/// `insert` will typically need to move `T` across threads. +pub struct SimpleTls { + map: Vec<(usize, T)>, +} + +impl SimpleTls { + /// Create a new SimpleTls. + pub const fn new() -> Self { + Self { map: Vec::new() } + } + + /// Insert a new association into the SimpleTls. + /// + /// If this thread has already been added, the value will be replaced. + pub fn insert(&mut self, thread: *const k_thread, data: T) { + let thread = thread as usize; + + match self.map.binary_search_by(|(id, _)| id.cmp(&thread)) { + Ok(pos) => self.map[pos] = (thread, data), // Replace existing. + Err(pos) => self.map.insert(pos, (thread, data)), + } + } + + /// Lookup the data associated with a given thread. + pub fn get(&self) -> Option { + let thread = unsafe { k_current_get() } as usize; + + self.map + .binary_search_by(|(id, _)| id.cmp(&thread)) + .ok() + .map(|pos| self.map[pos].1) + } +} + +/// A helper to safely use these with static. +/// +/// The StaticTls type has a constant constructor, and the same insert and get methods as the +/// underlying SimpleTls, with support for initializing the Mutex as needed. +// TODO: This should eventually make it into a more general lazy mechanism. +pub struct StaticTls { + /// The container for the data. + /// + /// The AtomicPtr is either Null, or contains a raw pointer to the underlying Mutex holding the + /// data. + data: AtomicPtr>>, +} + +impl StaticTls { + /// Create a new StaticTls that is empty. + pub const fn new() -> Self { + Self { + data: AtomicPtr::new(ptr::null_mut()), + } + } + + /// Get the underlying Mutex out of the data, initializing it with an empty type if necessary. + fn get_inner(&self) -> &Mutex> { + let data = self.data.fetch_update( + // TODO: These orderings are likely stronger than necessary. + Ordering::SeqCst, + Ordering::SeqCst, + |ptr| { + if ptr.is_null() { + // For null, we need to allocate a new one. + let data = Box::new(Mutex::new(SimpleTls::new())); + Some(Box::into_raw(data)) + } else { + // If there was already a value, just use it. + None + } + }); + let data = match data { + Ok(_) => { + // If the update stored something, it unhelpfully returns the old value, which was + // the null pointer. Since the pointer will only ever be updated once, it is safe + // to use a relaxed load here. + self.data.load(Ordering::Relaxed) + } + // If there was already a pointer, that is what we want. + Err(ptr) => ptr, + }; + + // SAFETY: The stored data was updated at most once, by the above code, and we now have a + // pointer to a valid leaked box holding the data. + unsafe { &*data } + } + + /// Insert a new association into the StaticTls. + pub fn insert(&self, thread: *const k_thread, data: T) { + let inner = self.get_inner(); + inner.lock().unwrap().insert(thread, data); + } + + /// Lookup the data associated with a given thread. + pub fn get(&self) -> Option { + let inner = self.get_inner(); + inner.lock().unwrap().get() + } +} diff --git a/zephyr/src/sync.rs b/zephyr/src/sync.rs index 977433bb..e76179f7 100644 --- a/zephyr/src/sync.rs +++ b/zephyr/src/sync.rs @@ -27,6 +27,8 @@ pub mod atomic { #[cfg(CONFIG_RUST_ALLOC)] pub use portable_atomic_util::Arc; +#[cfg(CONFIG_RUST_ALLOC)] +pub use portable_atomic_util::Weak; mod mutex; @@ -36,6 +38,7 @@ pub use mutex::{ Condvar, LockResult, TryLockResult, + TryLockError, }; mod spinmutex; diff --git a/zephyr/src/sync/channel.rs b/zephyr/src/sync/channel.rs index 7dba4bb5..67cfc652 100644 --- a/zephyr/src/sync/channel.rs +++ b/zephyr/src/sync/channel.rs @@ -45,12 +45,15 @@ use alloc::boxed::Box; use core::cell::UnsafeCell; use core::ffi::c_void; use core::fmt; +use core::future::Future; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::pin::Pin; +use core::task::Poll; +use crate::kio::ContextExt; use crate::sys::queue::Queue; -use crate::time::{Forever, NoWait, Timeout}; +use crate::time::{Duration, Forever, NoWait, Timeout}; mod counter; @@ -205,6 +208,85 @@ impl Sender { } } +// A little note about the Unpin constraint here. Because Futures are pinned in Rust Async code, +// and the future stores the messages, we can only send and receive messages that aren't pinned. +impl Sender { + /// Waits for a message to be sent into the channel, but only for a limited time. Async + /// version. + /// + /// This has the same behavior as [`send_timeout`], but as an Async function. + /// + /// [`send_timeout`]: Sender::send_timeout + pub fn send_timeout_async<'a>(&'a self, msg: T, timeout: impl Into) + -> impl Future>> + 'a + { + SendFuture { + sender: self, + msg: Some(msg), + timeout: timeout.into(), + waited: false, + } + } + + /// Sends a message over the given channel, waiting if necessary. Async version. + pub async fn send_async(&self, msg: T) -> Result<(), SendError> { + self.send_timeout_async(msg, Forever).await + } + + // Note that there is no async version of `try_send`. +} + +/// The implementation of Future for Sender::send_timeout_async. +struct SendFuture<'a, T: Unpin> { + sender: &'a Sender, + msg: Option, + timeout: Timeout, + waited: bool, +} + +impl<'a, T: Unpin> Future for SendFuture<'a, T> { + type Output = Result<(), SendError>; + + fn poll(self: Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> core::task::Poll { + /* + let this = unsafe { + Pin::get_unchecked_mut(self) + }; + */ + let this = Pin::get_mut(self); + + // Take the message out in preparation to try sending it. It is a logic error if the unwrap + // fails. + let msg = this.msg.take().unwrap(); + + // Try sending the message, with no timeout. + let msg = match this.sender.try_send(msg) { + Ok(()) => return Poll::Ready(Ok(())), + Err(SendError(msg)) => msg, + }; + + if this.waited { + // We already waited, and no message, so give the messagre back, indiciating a timeout. + return Poll::Ready(Err(SendError(msg))); + } + + // Send didn't happen, put the message back to have for the next call. + this.msg = Some(msg); + + // Otherwise, schedule to wake up on receipt or timeout. + match &this.sender.flavor { + SenderFlavor::Unbounded { .. } => { + panic!("Implementation error: unbounded queues should never fail"); + } + SenderFlavor::Bounded(chan) => { + cx.add_queue(&chan.free, this.timeout); + } + } + + Poll::Pending + } +} + impl Drop for Sender { fn drop(&mut self) { match &self.flavor { @@ -341,6 +423,42 @@ impl Receiver { } } +// Note that receive doesn't need the Unpin constraint, as we aren't storing any message. +impl Receiver { + /// Waits for a message to be received from the channel, but only for a limited time. + /// Async version. + /// + /// If the channel is empty and not disconnected, this call will block until the receive + /// operation can proceed or the operation times out. + /// wake up and return an error. + pub fn recv_timeout_async<'a>(&'a self, timeout: impl Into) + -> impl Future> + 'a + { + RecvFuture { + receiver: self, + timeout: timeout.into(), + waited: false, + } + } + + /// Blocks the current thread until a message is received or the channel is empty and + /// disconnected. Async version. + /// + /// If the channel is empty and not disconnected, this call will block until the receive + /// operation can proceed. + pub async fn recv_async(&self) -> Result { + self.recv_timeout_async(Forever).await + } + + /// Return a reference to the inner queue. + fn as_queue(&self) -> &Queue { + match &self.flavor { + ReceiverFlavor::Unbounded { queue, .. } => queue, + ReceiverFlavor::Bounded(chan) => &chan.chan, + } + } +} + impl Drop for Receiver { fn drop(&mut self) { match &self.flavor { @@ -390,6 +508,34 @@ impl fmt::Debug for Receiver { } } +struct RecvFuture<'a, T> { + receiver: &'a Receiver, + timeout: Timeout, + waited: bool, +} + +impl<'a, T> Future for RecvFuture<'a, T> { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut core::task::Context<'_>) -> Poll { + // Try to receive a message. + if let Ok(msg) = self.receiver.try_recv() { + return Poll::Ready(Ok(msg)); + } + + if self.waited { + // Wait already happened, so this is a timeout. + return Poll::Ready(Err(RecvError)); + } + + // Otherwise, schedule to wakeup on receipt or timeout. + cx.add_queue(self.receiver.as_queue(), self.timeout); + self.waited = true; + + Poll::Pending + } +} + /// The "flavor" of a receiver. This maps to the type of the channel. enum ReceiverFlavor { /// An unbounded queue. Messages were allocated with Box, and will be freed upon receipt. @@ -481,3 +627,92 @@ impl fmt::Debug for SendError { /// [`recv`]: Receiver::recv #[derive(PartialEq, Eq, Clone, Copy, Debug)] pub struct RecvError; + +/// Wait loop +/// +/// A common scenario for async work tasks is to wait for, and process messages off of a queue, but +/// to also wake periodically to perform some task. +/// +/// This performs this periodic loop. It has some support for handling the case where the +/// processing takes longer than the loop duration, but it merely re-schedules for the period past +/// the current time. This means the phase of the period will change upon dropped ticks. +/// +/// Each time an event is received, 'handle' is called with `Some(ev)`. In addition, periodically +/// (based on `period`) `handle` will be called with None. +/// +/// **Note**: It needs to be a single handler, because this closure will frequently be in a move +/// closure, and this would force shared data to be shared in Sync types of wrappers. The main +/// purpose of combining the event handling and the periodic is to avoid that. +/// +/// Note that also, if the timer is just barely able to run, it will still be scheduled "shortly" in +/// the future. +/// +/// T is the type of the messages expected to be received. +/// +/// TODO: This function, in general, is completely worthless without Rust support for [async +/// closures](https://rust-lang.github.io/rfcs/3668-async-closures.html). +pub async fn event_loop_useless ( + events: Receiver, + period: Duration, + mut handle: EF, +) -> ! +where + EF: FnMut(Option) -> EFF, + EFF: Future, +{ + // Start with a deadline 'period' out in the future. + let mut next = crate::time::now() + period; + loop { + if let Ok(ev) = events.recv_timeout_async(next).await { + handle(Some(ev)).await; + continue; + } + + // We either reached, or exceeded our timeout. + handle(None).await; + + // Calculate the next time. + next += period; + + // If this is passed, just reschedule after our Duration from "now". + let now = crate::time::now(); + if next <= now { + next = now + period; + } + } +} + +/// Wait loop, as a macro. +/// +/// This is the `event loop` above, implemented as a macro, which becomes more useful as the async +/// closures aren't needed. +#[macro_export] +macro_rules! event_loop { + ($events:expr, $period:expr, + Some($eventvar:ident) => $event_body:block, + None => $periodic_body: block $(,)?) => + { + let events = $events; + let period = $period; + let mut next = $crate::time::now() + period; + loop { + if let Ok($eventvar) = events.recv_timeout_async(next).await { + $event_body + } else { + // Note that ':block' above requires the braces, so this body can't introduce + // bindings that shadow our local variables. + $periodic_body + next += period; + + // If this is passed, just reschedule after our Duration from "now". + let now = $crate::time::now(); + if next <= now { + ::log::warn!("periodic overflow: {} ticks, {}:{}", + (now - next).ticks(), + core::file!(), core::line!()); + next = now + period; + } + } + } + }; +} diff --git a/zephyr/src/sys.rs b/zephyr/src/sys.rs index f85069a1..2c6a854f 100644 --- a/zephyr/src/sys.rs +++ b/zephyr/src/sys.rs @@ -41,6 +41,13 @@ pub fn uptime_get() -> i64 { } } +/// Busy wait. +/// +/// Busy wait for a give number of microseconds. This directly calls `zephyr_sys::k_busy_wait`. +/// +/// Zephyr has numerous caveats on configurations where this function doesn't work. +pub use zephyr_sys::k_busy_wait as busy_wait; + pub mod critical { //! Zephyr implementation of critical sections. //! diff --git a/zephyr/src/sys/queue.rs b/zephyr/src/sys/queue.rs index 943733f0..2360088d 100644 --- a/zephyr/src/sys/queue.rs +++ b/zephyr/src/sys/queue.rs @@ -23,7 +23,7 @@ use crate::time::Timeout; /// A wrapper around a Zephyr `k_queue` object. pub struct Queue { - item: Fixed, + pub(crate) item: Fixed, } unsafe impl Sync for StaticKernelObject { } diff --git a/zephyr/src/sys/sync/semaphore.rs b/zephyr/src/sys/sync/semaphore.rs index 1aafd8a6..85e7bdb4 100644 --- a/zephyr/src/sys/sync/semaphore.rs +++ b/zephyr/src/sys/sync/semaphore.rs @@ -11,16 +11,28 @@ //! operation, which in situation where counting is actually desired, will result in the count being //! incorrect. +#[cfg(CONFIG_RUST_ALLOC)] +use core::pin::Pin; +#[cfg(CONFIG_RUST_ALLOC)] +use core::task::{Context, Poll}; +#[cfg(CONFIG_RUST_ALLOC)] +use core::future::Future; use core::ffi::c_uint; use core::fmt; #[cfg(CONFIG_RUST_ALLOC)] use core::mem; +#[cfg(CONFIG_RUST_ALLOC)] +use zephyr_sys::ETIMEDOUT; + +use crate::kio::ContextExt; +#[cfg(CONFIG_RUST_ALLOC)] +use crate::time::NoWait; use crate::{ error::{to_result_void, Result}, object::{Fixed, StaticKernelObject, Wrapped}, raw::{ - k_sem, k_sem_count_get, k_sem_give, k_sem_init, k_sem_reset, k_sem_take + k_sem, k_sem_count_get, k_sem_give, k_sem_init, k_sem_reset, k_sem_take, }, time::Timeout, }; @@ -30,7 +42,7 @@ pub use crate::raw::K_SEM_MAX_LIMIT; /// A zephyr `k_sem` usable from safe Rust code. pub struct Semaphore { /// The raw Zephyr `k_sem`. - item: Fixed, + pub(crate) item: Fixed, } /// By nature, Semaphores are both Sync and Send. Safety is handled by the underlying Zephyr @@ -68,6 +80,18 @@ impl Semaphore { to_result_void(ret) } + /// Take a semaphore, async version. + /// + /// Returns a future that either waits for the semaphore, or returns status. + #[cfg(CONFIG_RUST_ALLOC)] + pub fn take_async<'a>(&'a self, timeout: impl Into) -> impl Future> + 'a { + SemTake { + sem: self, + timeout: timeout.into(), + ran: false, + } + } + /// Give a semaphore. /// /// This routine gives to the semaphore, unless the semaphore is already at its maximum @@ -100,6 +124,41 @@ impl Semaphore { } } +/// The async 'take' Future +#[cfg(CONFIG_RUST_ALLOC)] +struct SemTake<'a> { + /// The semaphore we're waiting on. + sem: &'a Semaphore, + /// The timeout to use. + timeout: Timeout, + /// Set after we've waited once. + ran: bool, +} + +#[cfg(CONFIG_RUST_ALLOC)] +impl<'a> Future for SemTake<'a> { + type Output = Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Always check if data is available. + if let Ok(()) = self.sem.take(NoWait) { + return Poll::Ready(Ok(())); + } + + if self.ran { + // If we ran once, and still don't have any data, indicate this as a timeout. + return Poll::Ready(Err(crate::Error(ETIMEDOUT))); + + } + + // TODO: Clean this up. + cx.add_semaphore(self.sem, self.timeout); + self.ran = true; + + Poll::Pending + } +} + /// A static Zephyr `k_sem`. /// /// This is intended to be used from within the `kobj_define!` macro. It declares a static ksem diff --git a/zephyr/src/time.rs b/zephyr/src/time.rs index e1d6eafb..7fe1c0b5 100644 --- a/zephyr/src/time.rs +++ b/zephyr/src/time.rs @@ -24,7 +24,7 @@ //! by non-constant values). Similarly, the `fugit` crate offers constructors that aim to result //! in constants when possible, avoiding costly division operations. -use zephyr_sys::{k_timeout_t, k_ticks_t}; +use zephyr_sys::{k_ticks_t, k_timeout_t, k_uptime_ticks}; use core::fmt::Debug; @@ -54,6 +54,12 @@ pub type Duration = fugit::Duration; #[cfg(CONFIG_TIMEOUT_64BIT)] pub type Instant = fugit::Instant; +/// Retrieve the current scheduler time as an Instant. This can be used to schedule timeouts at +/// absolute points in time. +pub fn now() -> Instant { + Instant::from_ticks(unsafe { k_uptime_ticks() as u64 }) +} + // The Zephyr `k_timeout_t` represents several different types of intervals, based on the range of // the value. It is a signed number of the same size as the Tick here, which effectively means it // is one bit less. @@ -71,8 +77,17 @@ pub type Instant = fugit::Instant; /// This wrapper allows us to implement `From` and `Info` from the Fugit types into the Zephyr /// types. This allows various methods to accept either value, as well as the `Forever` and /// `NoWait` values defined here. +#[derive(Clone, Copy, Debug)] pub struct Timeout(pub k_timeout_t); +impl PartialEq for Timeout { + fn eq(&self, other: &Self) -> bool { + self.0.ticks == other.0.ticks + } +} + +impl Eq for Timeout {} + // `From` allows methods to take a time of various types and convert it into a Zephyr timeout. impl From for Timeout { fn from(value: Duration) -> Timeout { diff --git a/zephyr/src/work.rs b/zephyr/src/work.rs new file mode 100644 index 00000000..6632a8ce --- /dev/null +++ b/zephyr/src/work.rs @@ -0,0 +1,622 @@ +//! Zephyr Work Queues +//! +//! # Zephyr Work Queues and Work +//! +//! Zephyr has a mechanism called a +//! [Workqueue](https://docs.zephyrproject.org/latest/kernel/services/threads/workqueue.html). +//! +//! Each workqueue is backed by a single Zephyr thread, and has its own stack. The work queue +//! consists of a FIFO queue of work items that will be run consecutively on that thread. The +//! underlying types are `k_work_q` for the work queue itself, and `k_work` for the worker. +//! +//! In addition to the simple schedulable work, Zephyr also has two additional types of work: +//! `k_work_delayable` which can be scheduled to run in the future, and `k_work_poll`, described as +//! triggered work in the docs. This can be scheduled to run when various items within Zephyr +//! become available. This triggered work also has a timeout. In this sense the triggered work is +//! a superset of the other types of work. Both delayable and triggered work are implemented by +//! having the `k_work` embedded in their structure, and Zephyr schedules the work when the given +//! reason happens. +//! +//! Zephyr's work queues can be used in different ways: +//! +//! - Work can be scheduled as needed. For example, an IRQ handler can queue a work item to process +//! data it has received from a device. +//! - Work can be scheduled periodically. +//! +//! As most C use of Zephyr statically allocates things like work, these are typically rescheduled +//! when the work is complete. The work queue scheduling functions are designed, and intended, for +//! a given work item to be able to reschedule itself, and such usage is common. +//! +//! ## Waitable events +//! +//! The triggerable work items can be triggered to wake on a set of any of the following: +//! +//! - A signal. `k_poll_signal` is a type used just for waking work items. This works similar to a +//! binary semaphore, but is lighter weight for use just by this mechanism. +//! - A semaphore. Work can be scheduled to run when a `k_sem` is available. Since +//! [`sys::sync::Semaphore`] is built on top of `k_sem`, the "take" operation for these semaphores +//! can be a trigger source. +//! - A queue/FIFO/LIFO. The queue is used to implement [`sync::channel`] and thus any blocking +//! operation on queues can be a trigger source. +//! - Message Queues, and Pipes. Although not yet provided in Rust, these can also be a source of +//! triggering. +//! +//! It is important to note that the trigger source may not necessarily still be available by the +//! time the work item is actually run. This depends on the design of the system. If there is only +//! a single waiter, then it will still be available (the mechanism does not have false triggers, +//! like CondVar). +//! +//! Also, note, specifically, that Zephyr Mutexes cannot be used as a trigger source. That means +//! that locking a [`sync::Mutex`] shouldn't be use within work items. There is another +//! [`kio::sync::Mutex`], which is a simplified Mutex that is implemented with a Semaphore that can +//! be used from work-queue based code. +//! +//! # Rust `Future` +//! +//! The rust language, also has built-in support for something rather similar to Zephyr work queues. +//! The main user-visible type behind this is [`Future`]. The rust compiler has support for +//! functions, as well as code blocks to be declared as `async`. For this code, instead of directly +//! returning the given data, returns a `Future` that has as its output type the data. What this +//! does is essentially capture what would be stored on the stack to maintain the state of that code +//! into the data of the `Future` itself. For rust code running on a typical OS, a crate such as +//! [Tokio](https://tokio.rs/) provides what is known as an executor, which implements the schedule +//! for these `Futures` as well as provides equivalent primitives for Mutex, Semaphores and channels +//! for this code to use for synchronization. +//! +//! It is notable that the Zephyr implementation of `Future` operations under a fairly simple +//! assumption of how this scheduling will work. Each future is invoked with a Context, which +//! contains a dynamic `Waker` that can be invoked to schedule this Future to run again. This means +//! that the primitives are typically implemented above OS primitives, where each manages wake +//! queues to determine the work that needs to be woken. +//! +//! # Bringing it together. +//! +//! There are a couple of issues that need to be addressed to bring work-queue support to Rust. +//! First is the question of how they will be used. On the one hand, there are users that will +//! definitely want to make use of `async` in rust, and it is important to implement a executor, +//! similar to Tokio, that will schedule this `async` code. On the other hand, it will likely be +//! common for others to want to make more direct use of the work queues themselves. As such, these +//! users will want more direct access to scheduling and triggering of work. +//! +//! ## Future erasure +//! +//! One challenge with using `Future` for work is that the `Future` type intentionally erases the +//! details of scheduling work, reducing it down to a single `Waker`, which similar to a trait, has +//! a `wake` method to cause the executor to schedule this work. Unfortunately, this simple +//! mechanism makes it challenging to take advantage of Zephyr's existing mechanisms to be able to +//! automatically trigger work based on primitives. +//! +//! As such, what we do is have a structure `Work` that contains both a `k_work_poll` as well as +//! `Context` from Rust. Our handler can use a mechanism similar to C's `CONTAINER_OF` macro to +//! recover this outer structure. +//! +//! There is some extra complexity to this process, as the `Future` we are storing associated with +//! the work is `?Sized`, since each particular Future will have a different size. As such, it is +//! not possible to recover the full work type. To work around this, we have a Sized struct at the +//! beginning of this structure, that along with judicious use of `#[repr(C)]` allows us to recover +//! this fixed data. This structure contains the information needed to re-schedule the work, based +//! on what is needed. +//! +//! ## Ownership +//! +//! The remaining challenge with implementing `k_work` for Rust is that of ownership. The model +//! taken here is that the work items are held in a `Box` that is effectively owned by the work +//! itself. When the work item is scheduled to Zephyr, ownership of that box is effectively handed +//! off to C, and then when the work item is called, the Box re-constructed. This repeats until the +//! work is no longer needed (e.g. when a [`Future::poll`] returns `Ready`), at which point the work +//! will be dropped. +//! +//! There are two common ways the lifecycle of work can be managed in an embedded system: +//! +//! - A set of `Future`'s are allocated once at the start, and these never return a value. Work +//! Futures inside of this (which correspond to `.await` in async code) can have lives and return +//! values, but the main loops will not return values, or be dropped. Embedded Futures will +//! typically not be boxed. +//! - Work will be dynamically created based on system need, with threads using [`kio::spawn`] to +//! create additional work (or creating the `Work` items directly). These can use [`join`] or +//! [`join_async`] to wait for the results. +//! +//! One consequence of the ownership being passed through to C code is that if the work cancellation +//! mechanism is used on a work queue, the work items themselves will be leaked. +//! +//! The Future mechanism in Rust relies on the use of [`Pin`] to ensure that work items are not +//! moved. We have the same requirements here, although currently, the pin is only applied while +//! the future is run, and we do not expose the `Box` that we use, thus preventing moves of the work +//! items. +//! +//! ## The work queues themselves +//! +//! Workqueues themselves are built using [`WorkQueueBuilder`]. This needs a statically defined +//! stack. Typical usage will be along the lines of: +//! ```rust +//! kobj_define! { +//! WORKER_STACK: ThreadStack<2048>; +//! } +//! // ... +//! let main_worker = Box::new(j +//! WorkQueueBuilder::new() +//! .set_priority(2). +//! .set_name(c"mainloop") +//! .set_no_yield(true) +//! .start(MAIN_LOOP_STACK.init_once(()).unwrap()) +//! ); +//! +//! let _ = zephyr::kio::spawn( +//! mainloop(), // Async or function returning Future. +//! &main_worker, +//! c"w:mainloop", +//! ); +//! +//! ... +//! +//! // Leak the Box so that the worker is never freed. +//! let _ = Box::leak(main_worker); +//! ``` +//! +//! It is important that WorkQueues never be dropped. It has a Drop implementation that invokes +//! panic. Zephyr provides no mechanism to stop work queue threads, so dropping would result in +//! undefined behavior. +//! +//! # Current Status +//! +//! Although Zephyr has 3 types of work queues, the `k_work_poll` is sufficient to implement all of +//! the behavior, and this implementation only implements this type. Non Future work could be built +//! around the other work types. +//! +//! As such, this means that manually constructed work is still built using `Future`. The `_async` +//! primitives throughout this crate can be used just as readily by hand-written Futures as by async +//! code. Notable, the use of [`Signal`] will likely be common, along with possible timeouts. +//! +//! [`sys::sync::Semaphore`]: crate::sys::sync::Semaphore +//! [`sync::channel`]: crate::sync::channel +//! [`sync::Mutex`]: crate::sync::Mutex +//! [`kio::sync::Mutex`]: crate::kio::sync::Mutex +//! [`kio::spawn`]: crate::kio::spawn +//! [`join`]: futures::JoinHandle::join +//! [`join_async`]: futures::JoinHandle::join_async + +extern crate alloc; + +use alloc::boxed::Box; +use core::{ + convert::Infallible, + ffi::{c_int, c_uint, CStr}, + future::Future, + mem, + pin::Pin, + ptr, + task::Poll, +}; + +use zephyr_sys::{ + k_poll_signal, k_poll_signal_check, k_poll_signal_init, k_poll_signal_raise, + k_poll_signal_reset, k_work, k_work_init, k_work_q, k_work_queue_config, k_work_queue_init, + k_work_queue_start, k_work_submit, k_work_submit_to_queue, ETIMEDOUT, +}; + +use crate::{error::to_result_void, kio::ContextExt, object::Fixed, simpletls::StaticTls, sys::thread::ThreadStack, time::Timeout}; + +pub mod futures; + +/// A builder for work queues themselves. +/// +/// A work queue is a Zephyr thread that instead of directly running a piece of code, manages a work +/// queue. Various types of `Work` can be submitted to these queues, along with various types of +/// triggering conditions. +pub struct WorkQueueBuilder { + /// The "config" value passed in. + config: k_work_queue_config, + /// Priority for the work queue thread. + priority: c_int, +} + +impl WorkQueueBuilder { + /// Construct a new WorkQueueBuilder with default values. + pub fn new() -> Self { + Self { + config: k_work_queue_config { + name: ptr::null(), + no_yield: false, + essential: false, + }, + priority: 0, + } + } + + /// Set the name for the WorkQueue thread. + /// + /// This name shows up in debuggers and some analysis tools. + pub fn set_name(&mut self, name: &'static CStr) -> &mut Self { + self.config.name = name.as_ptr(); + self + } + + /// Set the "no yield" flag for the created worker. + /// + /// If this is not set, the work queue will call `k_yield` between each enqueued work item. For + /// non-preemptible threads, this will allow other threads to run. For preemptible threads, + /// this will allow other threads at the same priority to run. + /// + /// This method has a negative in the name, which goes against typical conventions. This is + /// done to match the field in the Zephyr config. + pub fn set_no_yield(&mut self, value: bool) -> &mut Self { + self.config.no_yield = value; + self + } + + /// Set the "essential" flag for the created worker. + /// + /// This sets the essential flag on the running thread. The system considers the termination of + /// an essential thread to be a fatal error. + pub fn set_essential(&mut self, value: bool) -> &mut Self { + self.config.essential = value; + self + } + + /// Set the priority for the worker thread. + /// + /// See the Zephyr docs for the meaning of priority. + pub fn set_priority(&mut self, value: c_int) -> &mut Self { + self.priority = value; + self + } + + /// Start the given work queue thread. + /// + /// TODO: Implement a 'start' that works from a static work queue. + pub fn start(&self, stack: ThreadStack) -> WorkQueue { + let item: Fixed = Fixed::new(unsafe { mem::zeroed() }); + unsafe { + // SAFETY: Initialize zeroed memory. + k_work_queue_init(item.get()); + + // SAFETY: This associates the workqueue with the thread ID that runs it. The thread is + // a pointer into this work item, which will not move, because of the Fixed. + let this = &mut *item.get(); + WORK_QUEUES.insert(&this.thread, WorkQueueRef(item.get())); + + // SAFETY: Start work queue thread. The main issue here is that the work queue cannot + // be deallocated once the thread has started. We enforce this by making Drop panic. + k_work_queue_start( + item.get(), + stack.base, + stack.size, + self.priority, + &self.config, + ); + } + + WorkQueue { item } + } +} + +/// A running work queue thread. +/// +/// # Panic +/// +/// Allowing a work queue to drop will result in a panic. There are two ways to handle this, +/// depending on whether the WorkQueue is in a Box, or an Arc: +/// ``` +/// // Leak a work queue in an Arc. +/// let wq = Arc::new(WorkQueueBuilder::new().start(...)); +/// // If the Arc is used after this: +/// let _ = Arc::into_raw(wq.clone()); +/// // If the Arc is no longer needed: +/// let _ = Arc::into_raw(wq); +/// +/// // Leak a work queue in a Box. +/// let wq = Box::new(WorkQueueBuilder::new().start(...)); +/// let _ = Box::leak(wq); +/// +pub struct WorkQueue { + #[allow(dead_code)] + item: Fixed, +} + +/// Work queues can be referenced from multiple threads, and thus are Send and Sync. +unsafe impl Send for WorkQueue {} +unsafe impl Sync for WorkQueue {} + +impl Drop for WorkQueue { + fn drop(&mut self) { + panic!("WorkQueues must not be dropped"); + } +} + +/// A simple mapping to get the current work_queue from the currently running thread. +/// +/// This assumes that Zephyr's works queues have a 1:1 mapping between the work queue and the +/// thread. +/// +/// # Safety +/// +/// The work queue is protected with a sync Mutex (which uses an underlying Zephyr mutex). It is, +/// in general, not a good idea to use a mutex in a work queue, as deadlock can happen. So it is +/// important to both never .await while holding the lock, as well as to make sure operations within +/// it are relatively fast. In this case, `jnsert` and `get` on the SimpleTls are reasonably fast. +/// `insert` is usually done just at startup as well. +/// +/// This is a little bit messy as we don't have a lazy mechanism, so we have to handle this a bit +/// manually right now. +static WORK_QUEUES: StaticTls = StaticTls::new(); + +/// For the queue mapping, we need a simple wrapper around the underlying pointer, one that doesn't +/// implement stop. +#[derive(Copy, Clone)] +struct WorkQueueRef(*mut k_work_q); + +// SAFETY: The work queue reference is also safe for both Send and Sync per Zephyr semantics. +unsafe impl Send for WorkQueueRef {} +unsafe impl Sync for WorkQueueRef {} + +/// Retrieve the current work queue, if we are running within one. +pub fn get_current_workq() -> Option<*mut k_work_q> { + WORK_QUEUES.get().map(|wq| wq.0) +} + +/// A Rust wrapper for `k_poll_signal`. +/// +/// A signal in Zephyr is an event mechanism that can be used to trigger actions in event queues to +/// run. The work somewhat like a kind of half boolean semaphore. The signaling is robust in the +/// direction of the event happening, as in a blocked task will definitely wake when the signal happens. However, the clearing of the signal is racy. Generally, there are two ways to do this: +/// +/// - A work action can clear the signal as soon as it wakes up, before it starts processing any +/// data the signal was meant to indicate. If the race happens, the processing will handle the +/// extra data. +/// - A work action can clear the signal after it does it's processing. This is useful for things +/// like periodic timers, where if it is still processing when an additional timer tick comes in, +/// that timer tick will be ignored. This is useful for periodic events where it is better to +/// just skip a tick rather than for them to "stack up" and get behind. +/// +/// Notably, as long as the `reset` method is only ever called by the worker that is waiting upon +/// it, there shouldn't ever be a race in the `wait_async` itself. +/// +/// Signals can pass a `c_int` from the signalling task to the task that is waiting for the signal. +/// It is not specified in the Zephyr documentation what value will be passed if `raise` is called +/// multiple times before a task waits upon a signal. The current implementation will return the +/// most recent raised `result` value. +/// +/// For most other use cases, channels or semaphores are likely to be better solutions. +pub struct Signal { + /// The raw Zephyr `k_poll_signal`. + pub(crate) item: Fixed, +} + +// SAFETY: Zephyr's API maintains thread safety. +unsafe impl Send for Signal {} +unsafe impl Sync for Signal {} + +impl Signal { + /// Create a new `Signal`. + /// + /// The Signal will be in the non-signaled state. + pub fn new() -> Result { + // SAFETY: The memory is zero initialized, and Fixed ensure that it never changes address. + let item: Fixed = Fixed::new(unsafe { mem::zeroed() }); + unsafe { + k_poll_signal_init(item.get()); + } + Ok(Signal { item }) + } + + /// Reset the Signal + /// + /// This resets the signal state to unsignaled. + /// + /// Please see the [`Signal`] documentation on how to handle the races that this implies. + pub fn reset(&self) { + // SAFETY: This is safe with a non-mut reference, as the purpose of the Zephyr API is to + // coordinate this information between threads. + unsafe { + k_poll_signal_reset(self.item.get()); + } + } + + /// Check the status of a signal. + /// + /// This reads the status of the signal. If the state is "signalled", this will return + /// `Some(result)` where the `result` is the result value given to [`raise`]. + /// + /// [`raise`]: Self::raise + pub fn check(&self) -> Option { + let mut signaled: c_uint = 0; + let mut result: c_int = 0; + unsafe { + // SAFETY: Zephyr's signal API coordinates access across threads. + k_poll_signal_check(self.item.get(), &mut signaled, &mut result); + } + + if signaled != 0 { + Some(result) + } else { + None + } + } + + /// Signal a signal object. + /// + /// This will signal to any worker that is waiting on this object that the event has happened. + /// The `result` will be returned from the worker's `wait` call. + /// + /// As per the Zephyr docs, this could return an EAGAIN error if the polling thread is in the + /// process of expiring. The implication is that the signal will not be raised in this case. + /// ... + pub fn raise(&self, result: c_int) -> crate::Result<()> { + to_result_void(unsafe { k_poll_signal_raise(self.item.get(), result) }) + } + + /// Asynchronously wait for a signal to be signaled. + /// + /// If the signal has not been raised, will wait until it has been. If the signal has been + /// raised, the Future will immediately return that value without waiting. + /// + /// **Note**: there is no sync wait, as Zephyr does not provide a convenient mechanmism for + /// this. It could be implemented with `k_poll` if needed. + pub fn wait_async<'a>( + &'a self, + timeout: impl Into, + ) -> impl Future> + 'a { + SignalWait { + signal: self, + timeout: timeout.into(), + ran: false, + } + } +} + +impl Default for Signal { + fn default() -> Self { + Signal::new().unwrap() + } +} + +/// The Future for Signal::wait_async. +struct SignalWait<'a> { + /// The signal we are waiting on. + signal: &'a Signal, + /// The timeout to use. + timeout: Timeout, + /// Set after we've waited once, + ran: bool, +} + +impl<'a> Future for SignalWait<'a> { + type Output = crate::Result; + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut core::task::Context<'_>, + ) -> core::task::Poll { + // We can check if the even happened immediately, and avoid blocking if we were already + // signaled. + if let Some(result) = self.signal.check() { + return Poll::Ready(Ok(result)); + } + + if self.ran { + // If it is not ready, assuming a timeout. Note that if a thread other than this work + // thread resets the signal, it is possible to see a timeout even if `Forever` was given + // as the timeout. + return Poll::Ready(Err(crate::Error(ETIMEDOUT))); + } + + cx.add_signal(self.signal, self.timeout); + self.ran = true; + + Poll::Pending + } +} + +/// Possible returns from work queue submission. +#[derive(Debug, Clone, Copy)] +pub enum SubmitResult { + /// This work was already in a queue. + AlreadySubmitted, + /// The work has been added to the specified queue. + Enqueued, + /// The queue was called from the worker itself, and has been queued to the queue that was + /// running it. + WasRunning, +} + +impl SubmitResult { + /// Does this result indicate that the work was enqueued? + pub fn enqueued(self) -> bool { + matches!(self, Self::Enqueued | Self::WasRunning) + } + + /// Convert an int result from a work submit function. + fn to_result(value: c_int) -> crate::Result { + crate::error::to_result(value).map(|code| match code { + 0 => Self::AlreadySubmitted, + 1 => Self::Enqueued, + 2 => Self::WasRunning, + _ => panic!("Unexpected result {} from Zephyr work submission", code), + }) + } +} + +/// A simple action that just does something with its data. +/// +/// This is similar to a Future, except there is no concept of it completing. It manages its +/// associated data however it wishes, and is responsible for re-queuing as needed. +pub trait SimpleAction { + /// Perform the action. + fn act(self: Pin<&mut Self>); +} + +/// A basic Zephyr work item. +/// +/// Holds a `k_work`, along with the data associated with that work. When the work is queued, the +/// `act` method will be called on the provided `SimpleAction`. +#[repr(C)] +pub struct Work { + work: k_work, + action: T, +} + +impl> Work { + /// Construct a new Work from the given action. + /// + /// Note that the data will be moved into the pinned Work. The data is internal, and only + /// accessible to the work thread (the `act` method). If shared data is needed, normal + /// inter-thread sharing mechanisms are needed. + /// + /// TODO: Can we come up with a way to allow sharing on the same worker using Rc instead of Arc? + pub fn new(action: T) -> Pin> { + let mut this = Box::pin(Self { + // SAFETY: will be initialized below, after this is pinned. + work: unsafe { mem::zeroed() }, + action, + }); + let ptr = this.as_mut().as_k_work(); + // SAFETY: Initializes the zero allocated struct. + unsafe { + k_work_init(ptr, Some(Self::handler)); + } + + this + } + + /// Submit this work to the system work queue. + /// + /// This can return several possible `Ok` results. See the docs on [`SubmitResult`] for an + /// explanation of them. + pub fn submit(self: Pin<&mut Self>) -> crate::Result { + // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the + // work item is no longer queued when the data is dropped. + SubmitResult::to_result(unsafe { k_work_submit(self.as_k_work()) }) + } + + /// Submit this work to a specified work queue. + /// + /// TODO: Change when we have better wrappers for work queues. + pub fn submit_to_queue( + self: Pin<&mut Self>, + queue: *mut k_work_q, + ) -> crate::Result { + // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the + // work item is no longer queued when the data is dropped. + SubmitResult::to_result(unsafe { k_work_submit_to_queue(queue, self.as_k_work()) }) + } + + /// Get the pointer to the underlying work queue. + fn as_k_work(self: Pin<&mut Self>) -> *mut k_work { + // SAFETY: This is private, and no code here will move the pinned item. + unsafe { self.map_unchecked_mut(|s| &mut s.work).get_unchecked_mut() } + } + + /// Get a pointer into our action. + fn as_action(self: Pin<&mut Self>) -> Pin<&mut T> { + // SAFETY: We rely on the worker itself not moving the data. + unsafe { self.map_unchecked_mut(|s| &mut s.action) } + } + + /// Callback, through C, but bound by a specific type. + extern "C" fn handler(work: *mut k_work) { + // SAFETY: We rely on repr(C) placing the first field of a struct at the same address as the + // struct. This avoids needing a Rust equivalent to `CONTAINER_OF`. + let this: Pin<&mut Self> = unsafe { Pin::new_unchecked(&mut *(work as *mut Self)) }; + this.as_action().act(); + } +} diff --git a/zephyr/src/work/futures.rs b/zephyr/src/work/futures.rs new file mode 100644 index 00000000..d70cf2fb --- /dev/null +++ b/zephyr/src/work/futures.rs @@ -0,0 +1,615 @@ +//! Zephyr work wrappers targeted for the `Future` type. +//! +//! The future is similar to our [`SimpleAction`], with a few additional features: +//! - The poll function returns an enum indicating that either it can be suspended, or that it +//! is finished and has a result. +//! - The poll function takes a `Waker` which is used to "wake" the work item. +//! +//! However, there is a bit of a semantic mismatch between work queues and Futures. Futures are +//! effectively built with the assumption that the the waking will happen, by Rust code, at the +//! time the event is ready. However, work queues expect the work to be queued immediately, +//! with a "poll" indicating what kind of even the work. Work will be scheduled either based on +//! one of these events, or a timeout. +//! +//! [`SimpleAction`]: super::SimpleAction + +extern crate alloc; + +use alloc::boxed::Box; + +use core::{ + cell::UnsafeCell, + ffi::{c_int, c_void, CStr}, + future::Future, + mem, + pin::Pin, + ptr::{self, NonNull}, + task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, +}; + +use arrayvec::ArrayVec; +use zephyr_sys::{ + k_poll_event, k_poll_event_init, k_poll_modes_K_POLL_MODE_NOTIFY_ONLY, k_work, k_work_poll, + k_work_poll_init, k_work_poll_submit, k_work_poll_submit_to_queue, k_work_q, + ZR_POLL_TYPE_DATA_AVAILABLE, ZR_POLL_TYPE_SEM_AVAILABLE, ZR_POLL_TYPE_SIGNAL, +}; + +use crate::{ + printkln, + sync::{Arc, Mutex, Weak}, + sys::{queue::Queue, sync::Semaphore}, + time::{Duration, Forever, NoWait, Tick, Timeout}, +}; + +use super::{get_current_workq, Signal, SubmitResult, WorkQueue}; + +/// An answer to a completed Future. +/// +/// The are two times we need to wait on a future running to completion: the outer initial executor +/// invocation from the main thread, and running an async thread which will have a join method. +/// +/// For both cases, we will use a Semaphore to indicate when the data is available. +/// +/// The main issue is that this type is intended to be one shot. Trying to load a second value will +/// invalidate the data structure (the item will be replaced, but there is a race with the +/// semaphore). +/// +/// TODO: Currently, the data is stored inside of a Mutex. This isn't actually necessary (the +/// semaphore already manages the coordination), and only a memory barrier would be needed, which +/// would be provided by the semaphore. So, this should be changed to just unsafely share the data, +/// similar to how a mutex is implemented. +pub struct Answer { + item: Mutex>, + wake: Semaphore, +} + +impl Answer { + /// Construct a new Answer that does not have the result. + pub fn new() -> Self { + Self { + item: Mutex::new(None), + wake: Semaphore::new(0, 1).expect("Initialize semaphore"), + } + } + + /// Place the item into the Answer. + /// + /// # Panic + /// + /// If the answer already contains an item, this will panic. + /// + /// # TODO + /// + /// We could check that the Answer has ever been used, not just that it has an answer in it. + pub fn place(&self, item: T) { + let mut inner = self.item.lock().expect("Get Mutex"); + if inner.is_some() { + panic!("Answer already contains a value"); + } + *inner = Some(item); + self.wake.give(); + } + + /// Synchronously wait for an Answer. + /// + /// Blocks the current thread until an answer is available, returning it. + pub fn take(&self) -> T { + self.wake.take(Forever).expect("Forever returned early"); + self.item + .lock() + .expect("Get Mutex") + .take() + .expect("Answer should contain value") + } + + /// Asynchronously wait for an answer. + pub async fn take_async(&self) -> T { + self.wake + .take_async(Forever) + .await + .expect("Forever returnd early"); + self.item + .lock() + .expect("Get Mutex") + .take() + .expect("Answer should contain value") + } +} + +/// Build a combiner for Future and a Zephyr work queue. This encapsulates the idea of starting +/// a new thread of work, and is the basis of both the main `run` for work queues, as well as +/// any calls to spawn that happen within the Future world. +pub struct WorkBuilder { + queue: Option>, + // A name for this task, used by debugging and such. + name: Option<&'static CStr>, +} + +impl WorkBuilder { + /// Construct a new builder for work. + /// + /// The builder will default to running on the system workqueue. + pub fn new() -> Self { + Self { + queue: None, + name: None, + } + } + + /// Set the work queue for this worker to run on. + /// + /// By default, A Worker will run on the system work-queue. + pub fn set_worker(&mut self, worker: &WorkQueue) -> &mut Self { + self.queue = Some(NonNull::new(worker.item.get()).expect("work must not be null")); + self + } + + /// Set a name for this worker, for debugging. + pub fn set_name(&mut self, name: &'static CStr) -> &mut Self { + self.name = Some(name); + self + } + + /// Start this working, consuming the given Future to do the work. + /// + /// The work queue is in a pinned Arc to meet requirements of how Futures are used. The Arc + /// maintains lifetime while the worker is running. See notes below for issues of lifetimes + /// and canceled work. + pub fn start(&self, future: F) -> JoinHandle { + JoinHandle::new(self, future) + } + + /// Start this work, locally running on the current worker thread. + /// + /// This is the same as `start`, but the work will always be started on the current work queue + /// thread. This relaxes the `Send` requirement, as the data will always be contained in a + /// single thread. + /// + /// # Panics + /// + /// If called from other than a Future running on a work queue, will panic. The System work + /// queue is not yet supported. + pub fn start_local(&self, future: F) -> JoinHandle { + JoinHandle::new_local(self, future) + } +} + +/// A potentially running Work. +/// +/// This encapsulates a Future that is potentially running in the Zephyr work queue system. +/// +/// # Safety +/// +/// Once the worker has been started (meaning once WorkBuilder::start returns this `Work`), all +/// but one field here is owned by the worker itself (it runs on the worker thread, hence the +/// Send constraint). The exception is the 'answer' field which can be used by the caller to +/// wait for the Work to finish. +pub struct JoinHandle { + /// The answer will be placed here. This Arc holds a strong reference, and if the spawning + /// thread doesn't hold the `Work`, it will be dropped. + answer: Arc>, +} + +// SAFETY: The join handle can be Send as long as the Output is send. It does not depend on the +// Future being send. +unsafe impl Send for JoinHandle +where + F: Future, + F::Output: Send +{ +} + +impl JoinHandle { + /// Construct new [`JoinHandle`] that runs on a specified [`WorkQueue`]. + fn new(builder: &WorkBuilder, future: F) -> Self { + // Answer holds the result when the work finishes. + let answer = Arc::new(Answer::new()); + + let work = WorkData::new( + future, + Arc::downgrade(&answer), + builder.queue, + builder.name, + ); + WorkData::submit(work).expect("Unable to enqueue worker"); + + Self { answer } + } +} + +impl JoinHandle { + /// Construct a new [`JoinHandle`] that runs on the current [`WorkQueue`]. + /// + /// # Panics + /// + /// If `new_local` is called from a context other than running within a worker defined in this + /// crate, it will panic. + /// + /// Note that currently, the system workq is not considered a worked defined in this crate. + fn new_local(builder: &WorkBuilder, future: F) -> Self { + let workq = get_current_workq().expect("Called new_local not from worker"); + let answer = Arc::new(Answer::new()); + + let work = WorkData::new( + future, + Arc::downgrade(&answer), + Some(NonNull::new(workq).unwrap()), + builder.name, + ); + WorkData::submit(work).expect("Unable to enqueue worker"); + + Self { answer } + } +} + +impl JoinHandle { + /// Synchronously wait for this future to have an answer. + pub fn join(&self) -> F::Output { + self.answer.take() + } + + /// Asynchronously wait for this future to have an answer. + pub async fn join_async(&self) -> F::Output { + self.answer.take_async().await + } +} + +/// Futures will need to be able to set the events and timeout of this waker. Because the Waker is +/// parameterized, they will not have access to the whole WorkWaker, but only this WakeInfo. +pub struct WakeInfo { + /// The work queue to submit this work to. None indicates the system workq. + pub(crate) queue: Option>, + /// Events to use for our next wakeup. Currently cleared before calling the future (although + /// this discards the wakeup reason, so needs to be fixed). + pub events: EventArray, + /// Timeout to use for the next wakeup. Will be set to Forever before calling the Future's + /// poll. + pub timeout: Timeout, + /// A Context to use for invoking workers. This `WakeInfo` can be recovered from this context. + /// Note that our contexts are `'static` as they are maintained inside of the worker. + pub context: Context<'static>, +} + +impl WakeInfo { + /// Recover the WakeInfo from a given context. + /// + /// # Safety + /// + /// Although the lifetime of Context is `'static`, the generic type passed to `Future` does not + /// specify a lifetime. As such, it is not possible for the future to store the Context, and + /// rescheduling must be specified before this Future invocation returns. + /// + /// This does assume we are only using the Zephyr scheduler. The Context does have an any-based + /// data pointer mechanism, but it is nightly. This recovery would be easier using that + /// mechanism. + pub unsafe fn from_context<'b>(context: &'b mut Context) -> &'b mut Self { + // SAFETY: We're doing pointer arithmetic to recover Self from a reference to the embedded + // context. The 'mut' is preserved to keep the rules of mut in Rust. + unsafe { + let this: *mut Context = context; + let this = this + .cast::() + .sub(mem::offset_of!(Self, context)) + .cast::(); + &mut *this + } + } + + /// Add an event that represents waiting for a semaphore to be available for "take". + pub fn add_semaphore<'a>(&'a mut self, sem: &'a Semaphore) { + // SAFETY: Fill with zeroed memory, initializatuon happens in the init function next. + self.events.push(unsafe { mem::zeroed() }); + let ev = self.events.last().unwrap(); + + unsafe { + k_poll_event_init( + ev.get(), + ZR_POLL_TYPE_SEM_AVAILABLE, + k_poll_modes_K_POLL_MODE_NOTIFY_ONLY as i32, + sem.item.get() as *mut c_void, + ); + } + } + + /// Add an event that represents waiting for a signal. + pub fn add_signal<'a>(&'a mut self, signal: &'a Signal) { + // SAFETY: Fill with zeroed memory, initializatuon happens in the init function next. + self.events.push(unsafe { mem::zeroed() }); + let ev = self.events.last().unwrap(); + + unsafe { + k_poll_event_init( + ev.get(), + ZR_POLL_TYPE_SIGNAL, + k_poll_modes_K_POLL_MODE_NOTIFY_ONLY as i32, + signal.item.get() as *mut c_void, + ); + } + } + + /// Add an event that represents waiting for a queue to have a message. + pub fn add_queue<'a>(&'a mut self, queue: &'a Queue) { + // SAFETY: Fill with zeroed memory, initializatuon happens in the init function next. + self.events.push(unsafe { mem::zeroed() }); + let ev = self.events.last().unwrap(); + + unsafe { + k_poll_event_init( + ev.get(), + ZR_POLL_TYPE_DATA_AVAILABLE, + k_poll_modes_K_POLL_MODE_NOTIFY_ONLY as i32, + queue.item.get() as *mut c_void, + ); + } + } +} + +/// The worker-owned information about that worker. +/// +/// This holds a single worker, and will be owned by that worker itself. +struct WorkData { + /// Info needed to reschedule the work. + info: WakeInfo, + /// The Zephyr worker. This struct is allocated in a Box, and only used by the worker thread, + /// so it is easy to recover. The UnsafeCell is to indicate that Zephyr is free to mutate the + /// work. + work: UnsafeCell, + /// Where the answer is placed. This is weak because the spawning thread may not be interested + /// in the result, which will drop the only reference to the Arc, breaking the weak reference. + answer: Weak>, + /// The future that is running this work. + future: F, +} + +// SAFETY: The worker struct is explicitly safe to send by the Zephyr docs. +// unsafe impl Send for WorkData {} + +impl WorkData { + /// Build a new WorkWaker around the given future. The weak reference to the answer is where + /// the answer is stored if the task spawner is still interested in the answer. + fn new( + future: F, + answer: Weak>, + queue: Option>, + name: Option<&'static CStr>, + ) -> Pin> { + // name is only used for SystemView debugging, so prevent a warning when that is not + // enabled. + let _ = name; + + let this = Box::pin(Self { + // SAFETY: This will be initialized below, once the Box allocates and the memory won't + // move. + work: unsafe { mem::zeroed() }, + future, + answer, + info: WakeInfo { + queue, + events: EventArray::new(), + // Initial timeout is NoWait so work starts as soon as submitted. + timeout: NoWait.into(), + context: Context::from_waker(&VOID_WAKER), + }, + }); + + unsafe { + // SAFETY: The above Arc allocates the worker. The code here is careful to not move it. + k_work_poll_init(this.work.get(), Some(Self::handler)); + + // If we have a name, send it to Segger. + #[cfg(CONFIG_SEGGER_SYSTEMVIEW)] + { + if let Some(name) = name { + let info = crate::raw::SEGGER_SYSVIEW_TASKINFO { + TaskID: this.work.get() as ::core::ffi::c_ulong, + sName: name.as_ptr(), + Prio: 1, + StackBase: 0, + StackSize: 32, + }; + crate::raw::SEGGER_SYSVIEW_OnTaskCreate(this.work.get() as ::core::ffi::c_ulong); + crate::raw::SEGGER_SYSVIEW_SendTaskInfo(&info); + } + } + } + + this + } + + /// Submit this work to the Zephyr work queue. This consumes the Box, with the primary owner + /// being the work thread itself. Not that canceling work will leak the worker. + fn submit(mut this: Pin>) -> crate::Result { + // SAFETY: This is unsafe because the pointer lose the Pin guarantee, but C code will not + // move it. + let this_ref = unsafe { + Pin::get_unchecked_mut(this.as_mut()) + }; + + let result = if let Some(queue) = this_ref.info.queue { + unsafe { + // SAFETY: We're transferring ownership of the box to the enqueued work. For + // regular re-submission as the worker runs, the worker won't be run until this + // method exits. For initial creation, there is a possible period where our + // reference here survives while the worker is schedule (when the work queue is + // higher priority than this. I'm not sure if this fully followes the rules, as + // there is still a reference to this here, but as long as we only use it to leak + // the box, I believe we are safe. If this is deemed unsafe, these values could be + // copied to variables and the box leaked before we enqueue. + k_work_poll_submit_to_queue( + queue.as_ptr(), + this_ref.work.get(), + this_ref.info.events.as_mut_ptr() as *mut k_poll_event, + this.info.events.len() as c_int, + this.info.timeout.0, + ) + } + } else { + unsafe { + // SAFETY: See above, safety here is the same. + k_work_poll_submit( + this_ref.work.get(), + this_ref.info.events.as_mut_ptr() as *mut k_poll_event, + this_ref.info.events.len() as c_int, + this_ref.info.timeout.0, + ) + } + }; + + // The Box has been handed to C. Consume the box, leaking the value. We use `into_raw` as + // it is the raw pointer we will be recovering the Box with when the worker runs. + let _ = Self::into_raw(this); + + match result { + 0 => Ok(SubmitResult::Enqueued), + code => panic!("Unexpected result from work poll submit: {}", code), + } + } + + /// The work callback, coming from the Zephyr C world. The box was into_raw(), We recover the + /// WorkWaker by using container_of and recovering it back into a box, which we will leak when + /// we re-submit it. + extern "C" fn handler(work: *mut k_work) { + // Note that we want to avoid needing a `repr(C)` on our struct, so the k_work pointer is + // not necessarily at the beginning of the struct. + let mut this = unsafe { Self::from_raw(work) }; + + let this_ref = unsafe { + Pin::get_unchecked_mut(this.as_mut()) + }; + + // Set the next work to Forever, with no events. TODO: This prevents the next poll from + // being able to determine the reason for the wakeup. + this_ref.info.events.clear(); + this_ref.info.timeout = Forever.into(); + + // SAFETY: poll requires the pointer to be pinned, in case that is needed. We rely on the + // Boxing of the pointer, and that our code does not move the future. + let future = unsafe { Pin::new_unchecked(&mut this_ref.future) }; + #[cfg(CONFIG_SEGGER_SYSTEMVIEW)] + unsafe { + crate::raw::SEGGER_SYSVIEW_OnTaskStartExec(work as u32); + } + match future.poll(&mut this_ref.info.context) { + Poll::Pending => { + #[cfg(CONFIG_SEGGER_SYSTEMVIEW)] + unsafe { + crate::raw::SEGGER_SYSVIEW_OnTaskStopExec(); + } + // With pending, use the timeout and events to schedule ourselves to do more work. + // TODO: If we want to support a real Waker, this would need to detect that, and + // schedule a possible wake on this no wake case. + // Currently, this check is only testing that something is missed, and is really + // more of a debug assertion. + if this.info.events.is_empty() && this.info.timeout == Forever.into() { + printkln!("Warning: worker scheduled to never wake up"); + } + + // The re-submission will give ownership of the box back to the scheduled work. + Self::submit(this).expect("Unable to schedule work"); + } + Poll::Ready(answer) => { + #[cfg(CONFIG_SEGGER_SYSTEMVIEW)] + unsafe { + crate::raw::SEGGER_SYSVIEW_OnTaskStopExec(); + } + // TODO: Delete the task as well. + // If the spawning task is still interested in the answer, provide it. + if let Some(store) = this.answer.upgrade() { + store.place(answer); + } + + // Work is finished, so allow the Box to be dropped. + } + } + } + + /// Consume the pinned box containing Self, and return the internal pointer. + fn into_raw(this: Pin>) -> *mut Self { + // SAFETY: This removes the Pin guarantee, but is given as a raw pointer to C, which doesn't + // generally use move. + let this = unsafe { Pin::into_inner_unchecked(this) }; + Box::into_raw(this) + } + + /// Given a pointer to the work_q burried within, recover the Pinned Box containing our data. + unsafe fn from_raw(ptr: *mut k_work) -> Pin> { + // SAFETY: This fixes the pointer back to the beginning of Self. This also assumes the + // pointer is valid. + let ptr = ptr + .cast::() + .sub(mem::offset_of!(k_work_poll, work)) + .sub(mem::offset_of!(Self, work)) + .cast::(); + let this = Box::from_raw(ptr); + Pin::new_unchecked(this) + } +} + +/// A VoidWaker is used when we don't use the Waker mechanism. There is no data associated with +/// this waker, and it panics if anyone tries to clone it or use it to wake a task. +/// This is static to simplify lifetimes. +static VOID_WAKER: Waker = unsafe { + Waker::from_raw(RawWaker::new( + ptr::null(), + &RawWakerVTable::new(void_clone, void_wake, void_wake_by_ref, void_drop), + )) +}; + +/// Void clone operation. Panics for now. If we want to implement a real waker, this will need +/// to be managed. +unsafe fn void_clone(_: *const ()) -> RawWaker { + panic!("Zephyr Wakers not yet supported for general 'Waker' use"); +} + +/// Void wake operation. Panics for now. If we want to implement a real waker, this will need +/// to be managed. +unsafe fn void_wake(_: *const ()) { + panic!("Zephyr Wakers not yet supported for general 'Waker' use"); +} + +/// Void wake_by_ref operation. Panics for now. If we want to implement a real waker, this will need +/// to be managed. +unsafe fn void_wake_by_ref(_: *const ()) { + panic!("Zephyr Wakers not yet supported for general 'Waker' use"); +} + +/// The void drop will be called when the Context is dropped after the first invocation. Because +/// clone above panics, we know there aren't references hanging around. So, it is safe to just +/// do nothing. +unsafe fn void_drop(_: *const ()) {} +/// To avoid having to parameterize everything, we limit the size of the ArrayVec of events to +/// this amount. The amount needed her depends on overall use, but so far, 1 is sufficient. +type EventArray = ArrayVec, 1>; + +/// Async sleep. +pub fn sleep(duration: Duration) -> Sleep { + Sleep { + ticks_left: duration.ticks(), + } +} + +/// A future that sleeps for a while. +pub struct Sleep { + // How much time is left. TODO: Change this into an absolute sleep once we have the ability to + // determine why were were scheduled. + ticks_left: Tick, +} + +impl Future for Sleep { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // If the sleep is done, so are we. + if self.ticks_left == 0 { + return Poll::Ready(()); + } + + // Otherwise, queue outselves back. + let this = unsafe { WakeInfo::from_context(cx) }; + + this.timeout = Duration::from_ticks(self.ticks_left).into(); + self.ticks_left = 0; + + Poll::Pending + } +}