Skip to main content

esp_idf_hal/
interrupt.rs

1use enumset::{EnumSet, EnumSetType};
2
3use esp_idf_sys::*;
4
5/// For backwards compatibility
6pub type IntrFlags = InterruptType;
7
8/// Interrupt allocation flags.
9/// These flags can be used to specify which interrupt qualities the code calling esp_intr_alloc* needs.
10#[derive(Debug, EnumSetType)]
11pub enum InterruptType {
12    // Accept a Level 1 interrupt vector (lowest priority)
13    Level1,
14    // Accept a Level 2 interrupt vector.
15    Level2,
16    // Accept a Level 3 interrupt vector.
17    Level3,
18    // Accept a Level 4 interrupt vector.
19    Level4,
20    // Accept a Level 5 interrupt vector.
21    Level5,
22    // Accept a Level 6 interrupt vector.
23    Level6,
24    // Accept a Level 7 interrupt vector (highest priority)
25    Nmi,
26    // Interrupt can be shared between ISRs.
27    Shared,
28    // Edge-triggered interrupt.
29    Edge,
30    // ISR can be called if cache is disabled.
31    // Must be used with a proper option *_ISR_IN_IRAM in SDKCONFIG
32    Iram,
33    // Return with this interrupt disabled.
34    IntrDisabled,
35    // Low and medium prio interrupts. These can be handled in C.
36    LowMed,
37    // High level interrupts. Need to be handled in assembly.
38    High,
39}
40
41impl InterruptType {
42    pub fn levels(&self) -> EnumSet<Self> {
43        Self::Level1
44            | Self::Level2
45            | Self::Level3
46            | Self::Level4
47            | Self::Level5
48            | Self::Level6
49            | Self::Nmi
50    }
51
52    pub(crate) fn to_native(flags: EnumSet<Self>) -> u32 {
53        let mut result = 0;
54
55        for flag in flags {
56            result |= u32::from(flag);
57        }
58
59        result
60    }
61}
62
63impl From<InterruptType> for u32 {
64    fn from(flag: InterruptType) -> Self {
65        match flag {
66            InterruptType::Level1 => esp_idf_sys::ESP_INTR_FLAG_LEVEL1,
67            InterruptType::Level2 => esp_idf_sys::ESP_INTR_FLAG_LEVEL2,
68            InterruptType::Level3 => esp_idf_sys::ESP_INTR_FLAG_LEVEL3,
69            InterruptType::Level4 => esp_idf_sys::ESP_INTR_FLAG_LEVEL4,
70            InterruptType::Level5 => esp_idf_sys::ESP_INTR_FLAG_LEVEL5,
71            InterruptType::Level6 => esp_idf_sys::ESP_INTR_FLAG_LEVEL6,
72            InterruptType::Nmi => esp_idf_sys::ESP_INTR_FLAG_NMI,
73            InterruptType::Shared => esp_idf_sys::ESP_INTR_FLAG_SHARED,
74            InterruptType::Edge => esp_idf_sys::ESP_INTR_FLAG_EDGE,
75            InterruptType::Iram => esp_idf_sys::ESP_INTR_FLAG_IRAM,
76            InterruptType::IntrDisabled => esp_idf_sys::ESP_INTR_FLAG_INTRDISABLED,
77            InterruptType::LowMed => esp_idf_sys::ESP_INTR_FLAG_LOWMED,
78            InterruptType::High => esp_idf_sys::ESP_INTR_FLAG_HIGH,
79        }
80    }
81}
82
83pub(crate) static CS: IsrCriticalSection = IsrCriticalSection::new();
84
85/// Returns true if the currently active core is executing an ISR request
86#[inline(always)]
87#[link_section = ".iram1.interrupt_active"]
88pub fn active() -> bool {
89    unsafe { xPortInIsrContext() != 0 }
90}
91
92pub fn with_isr_yield_signal(cb: impl FnOnce()) -> bool {
93    if !active() {
94        panic!("with_isr_yield_signal() can only be called from an ISR context");
95    }
96
97    let mut signaled = false;
98
99    let prev_yielder =
100        unsafe { set_isr_yielder(Some((do_yield_signal, &mut signaled as *mut _ as _))) };
101
102    cb();
103
104    unsafe { set_isr_yielder(prev_yielder) };
105
106    signaled
107}
108
109unsafe fn do_yield_signal(arg: *mut ()) {
110    let signaled = arg.cast::<bool>().as_mut().unwrap();
111
112    *signaled = true
113}
114
115#[allow(clippy::type_complexity)]
116static mut ISR_YIELDER: Option<(unsafe fn(*mut ()), *mut ())> = None;
117
118#[allow(clippy::type_complexity)]
119#[inline(always)]
120#[link_section = ".iram1.interrupt_get_isr_yielder"]
121pub(crate) unsafe fn get_isr_yielder() -> Option<(unsafe fn(*mut ()), *mut ())> {
122    if active() {
123        free(|| {
124            if let Some((func, arg)) = unsafe { ISR_YIELDER } {
125                Some((func, arg))
126            } else {
127                None
128            }
129        })
130    } else {
131        None
132    }
133}
134
135/// # Safety
136///
137/// This function should only be called from within an ISR handler, so as to set
138/// a custom ISR yield function (e.g. when using the ESP-IDF timer service).
139///
140/// Thus, if some function further down the ISR call chain invokes `do_yield`,
141/// the custom yield function set here will be called.
142///
143/// Users should not forget to call again `set_isr_yielder` at the end of the
144/// ISR handler so as to reastore the yield function which was valid before the
145/// ISR handler was invoked.
146#[allow(clippy::type_complexity)]
147#[inline(always)]
148#[link_section = ".iram1.interrupt_set_isr_yielder"]
149pub unsafe fn set_isr_yielder(
150    yielder: Option<(unsafe fn(*mut ()), *mut ())>,
151) -> Option<(unsafe fn(*mut ()), *mut ())> {
152    if active() {
153        free(|| {
154            let previous = unsafe { ISR_YIELDER };
155
156            unsafe { ISR_YIELDER = yielder };
157
158            previous
159        })
160    } else {
161        None
162    }
163}
164
165/// A critical section allows the user to disable interrupts
166#[cfg(any(esp32, esp32s2, esp32s3, esp32p4))]
167pub struct IsrCriticalSection(core::cell::UnsafeCell<portMUX_TYPE>);
168
169#[cfg(not(any(esp32, esp32s2, esp32s3, esp32p4)))]
170pub struct IsrCriticalSection(core::marker::PhantomData<*const ()>);
171
172#[cfg(not(any(esp32, esp32s2, esp32s3, esp32p4)))]
173#[inline(always)]
174#[link_section = ".iram1.interrupt_enter"]
175fn enter(_cs: &IsrCriticalSection) {
176    unsafe {
177        vPortEnterCritical();
178    }
179}
180
181#[cfg(any(esp32, esp32s2, esp32s3, esp32p4))]
182#[inline(always)]
183#[link_section = ".iram1.interrupt_enter"]
184fn enter(cs: &IsrCriticalSection) {
185    unsafe {
186        xPortEnterCriticalTimeout(cs.0.get(), portMUX_NO_TIMEOUT);
187    }
188}
189
190#[cfg(not(any(esp32, esp32s2, esp32s3, esp32p4)))]
191#[inline(always)]
192#[link_section = ".iram1.interrupt_exit"]
193fn exit(_cs: &IsrCriticalSection) {
194    unsafe {
195        vPortExitCritical();
196    }
197}
198
199#[cfg(any(esp32, esp32s2, esp32s3))]
200#[inline(always)]
201#[link_section = ".iram1.interrupt_exit"]
202fn exit(cs: &IsrCriticalSection) {
203    unsafe {
204        vPortExitCritical(cs.0.get());
205    }
206}
207
208#[cfg(esp32p4)]
209#[inline(always)]
210#[link_section = ".iram1.interrupt_exit"]
211fn exit(cs: &IsrCriticalSection) {
212    unsafe {
213        vPortExitCriticalMultiCore(cs.0.get());
214    }
215}
216
217impl IsrCriticalSection {
218    /// Constructs a new `IsrCriticalSection` instance
219    #[inline(always)]
220    pub const fn new() -> Self {
221        #[cfg(any(esp32, esp32s2, esp32s3, esp32p4))]
222        let mux = core::cell::UnsafeCell::new(portMUX_TYPE {
223            owner: portMUX_FREE_VAL,
224            count: 0,
225            #[cfg(esp_idf_freertos_portmux_debug)]
226            lastLockedFn: b"(never locked)",
227            #[cfg(esp_idf_freertos_portmux_debug)]
228            lastLockedLine: -1,
229        });
230
231        #[cfg(not(any(esp32, esp32s2, esp32s3, esp32p4)))]
232        let mux = core::marker::PhantomData;
233
234        Self(mux)
235    }
236
237    /// Disables all interrupts for the lifetime of the returned guard instance.
238    /// This method supports nesting in that is safe to be called multiple times.
239    /// This method is also safe to call from ISR routines.
240    ///
241    /// NOTE: On dual-core esp32* chips, interrupts will be disabled only on one of
242    /// the cores (the one where `IsrCriticalSection::enter` is called), while the other
243    /// core will continue its execution. Moreover, if the same `IsrCriticalSection` instance
244    /// is shared across multiple threads, where some of these happen to be scheduled on
245    /// the second core (which has its interrupts enabled), the second core will then spinlock
246    /// (busy-wait) in `IsrCriticalSection::enter`, until the first CPU releases the critical
247    /// section and re-enables its interrupts. The second core will then - in turn - disable
248    /// its interrupts and own the spinlock.
249    ///
250    /// For more information, refer to https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-guides/freertos-smp.html#critical-sections
251    #[inline(always)]
252    pub fn enter(&self) -> IsrCriticalSectionGuard<'_> {
253        enter(self);
254
255        IsrCriticalSectionGuard(self)
256    }
257}
258
259impl Default for IsrCriticalSection {
260    #[inline(always)]
261    fn default() -> Self {
262        Self::new()
263    }
264}
265
266unsafe impl Send for IsrCriticalSection {}
267unsafe impl Sync for IsrCriticalSection {}
268
269pub struct IsrCriticalSectionGuard<'a>(&'a IsrCriticalSection);
270
271impl Drop for IsrCriticalSectionGuard<'_> {
272    /// Drops the critical section guard thus potentially re-enabling
273    /// al interrupts for the currently active core.
274    ///
275    /// Note that - due to the fact that calling `IsrCriticalSection::enter`
276    /// multiple times on the same or multiple critical sections is supported -
277    /// interrupts for the core will be re-enabled only when the last guard that
278    /// disabled interrupts for the concrete core is dropped.
279    #[inline(always)]
280    fn drop(&mut self) {
281        exit(self.0);
282    }
283}
284
285/// Executes closure f in an interrupt-free context
286#[inline(always)]
287#[link_section = ".iram1.interrupt_free"]
288pub fn free<R>(f: impl FnOnce() -> R) -> R {
289    let _guard = CS.enter();
290
291    f()
292}
293
294#[cfg(feature = "wake-from-isr")]
295pub mod asynch {
296    pub type HalIsrNotification = crate::task::asynch::Notification;
297}
298
299#[cfg(not(feature = "wake-from-isr"))]
300pub mod asynch {
301    use core::{
302        cell::UnsafeCell,
303        ffi::{c_void, CStr},
304        future::Future,
305        num::NonZeroU32,
306        sync::atomic::{AtomicPtr, Ordering},
307        task::{Context, Poll, Waker},
308    };
309
310    use esp_idf_sys::EspError;
311
312    use ::log::info;
313
314    use crate::{
315        cpu::Core,
316        delay,
317        task::{asynch::Notification, CriticalSection},
318    };
319
320    use super::IsrCriticalSection;
321
322    /// The HAL-global wake runner.
323    /// You should use no more than 64 tasks with it.
324    ///
325    /// `*IsrNotification` instances use this wake runner when they are triggered from an ISR context.
326    pub static HAL_ISR_REACTOR: IsrReactor<64> = IsrReactor::new(IsrReactorConfig::new());
327
328    /// Wake runner configuration
329    #[derive(Clone, Debug)]
330    pub struct IsrReactorConfig {
331        pub task_name: &'static CStr,
332        pub task_stack_size: usize,
333        pub task_priority: u8,
334        pub task_pin_to_core: Option<Core>,
335    }
336
337    impl IsrReactorConfig {
338        #[allow(clippy::manual_c_str_literals)]
339        pub const fn new() -> Self {
340            Self {
341                task_name: unsafe { CStr::from_bytes_with_nul_unchecked(b"IsrReactor\0") },
342                task_stack_size: 3084,
343                task_priority: 11,
344                task_pin_to_core: None,
345            }
346        }
347    }
348
349    impl Default for IsrReactorConfig {
350        fn default() -> Self {
351            Self::new()
352        }
353    }
354
355    /// IsrReactor is a utility allowing `Waker` instances to be awoken fron an ISR context.
356    ///
357    /// General problem:
358    /// In an interrupt, using Waker instances coming from generic executors is impossible,
359    /// because these are not designed with an ISR-safety in mind.
360    ///
361    /// Waking a waker means that its task would be scheduled on the executor queue, which might involve
362    /// allocation, and/or synchronization primitives which are not safe to use from an ISR context.
363    ///
364    /// Similarly, dropping a waker might also drop the executor task, resulting in a deallocation, which is also
365    /// not safe in an ISR context.
366    ///
367    /// These problems are alleviated by replacing direct `waker.wake()` calls to `WakerRunner::schedule(waker)`.
368    /// What `IsrReactor::schedule` does is to push the waker into a bounded queue and then notify a hidden FreeRTOS task.
369    /// Once the FreeRTOS task gets awoken, it wakes all wakers scheduled on the bounded queue and empties the queue.
370    pub struct IsrReactor<const N: usize> {
371        wakers_cs: IsrCriticalSection,
372        wakers: UnsafeCell<heapless::Deque<Waker, N>>,
373        task_cs: CriticalSection,
374        task: AtomicPtr<crate::sys::tskTaskControlBlock>,
375        task_config: IsrReactorConfig,
376    }
377
378    impl<const N: usize> IsrReactor<N> {
379        /// Create a new `IsrReactor` instance.
380        pub const fn new(config: IsrReactorConfig) -> Self {
381            Self {
382                wakers_cs: IsrCriticalSection::new(),
383                wakers: UnsafeCell::new(heapless::Deque::new()),
384                task_cs: CriticalSection::new(),
385                task: AtomicPtr::new(core::ptr::null_mut()),
386                task_config: config,
387            }
388        }
389
390        /// Returns `true` if the wake runner is started.
391        pub fn is_started(&self) -> bool {
392            !self.task.load(Ordering::SeqCst).is_null()
393        }
394
395        /// Starts the wake runner. Returns `false` if it had been already started.
396        pub fn start(&'static self) -> Result<bool, EspError> {
397            let _guard = self.task_cs.enter();
398
399            if self.task.load(Ordering::SeqCst).is_null() {
400                let task = unsafe {
401                    crate::task::create(
402                        Self::task_run,
403                        self.task_config.task_name,
404                        self.task_config.task_stack_size,
405                        self as *const _ as *const c_void as *mut _,
406                        self.task_config.task_priority,
407                        self.task_config.task_pin_to_core,
408                    )?
409                };
410
411                self.task.store(task as _, Ordering::SeqCst);
412
413                info!("IsrReactor {:?} started.", self.task_config.task_name);
414
415                Ok(true)
416            } else {
417                Ok(false)
418            }
419        }
420
421        /// Stops the wake runner. Returns `false` if it had been already stopped.
422        pub fn stop(&self) -> bool {
423            let _guard = self.task_cs.enter();
424
425            let task = self.task.swap(core::ptr::null_mut(), Ordering::SeqCst);
426
427            if !task.is_null() {
428                unsafe {
429                    crate::task::destroy(task as _);
430                }
431
432                info!("IsrReactor {:?} stopped.", self.task_config.task_name);
433
434                true
435            } else {
436                false
437            }
438        }
439
440        /// Schedules a waker to be awoken by the hidden FreeRTOS task running in the background.
441        /// If not called from within an ISR context, calls `waker.wake()` directly instead of scheduling the waker.
442        /// NOTE: If the wake runner is not started yet, scheduling fron an ISR content would fail silently.
443        ///
444        /// This and only this method is safe to call from an ISR context.
445        pub fn schedule(&self, waker: Waker) {
446            if super::active() {
447                self.wakers(|wakers| {
448                    let earlier_waker = wakers.iter_mut().find(|a_waker| a_waker.will_wake(&waker));
449
450                    if let Some(earlier_waker) = earlier_waker {
451                        *earlier_waker = waker;
452                    } else if wakers.push_back(waker).is_err() {
453                        panic!("IsrReactor queue overflow");
454                    }
455
456                    let task = self.task.load(Ordering::SeqCst);
457
458                    if !task.is_null() {
459                        unsafe {
460                            crate::task::notify_and_yield(task as _, NonZeroU32::new(1).unwrap());
461                        }
462                    }
463                })
464            } else {
465                waker.wake();
466            }
467        }
468
469        fn run(&self) {
470            loop {
471                loop {
472                    let waker = self.wakers(|wakers| wakers.pop_front());
473
474                    if let Some(waker) = waker {
475                        waker.wake();
476                    } else {
477                        break;
478                    }
479                }
480
481                crate::task::wait_notification(delay::BLOCK);
482            }
483        }
484
485        fn wakers<F: FnOnce(&mut heapless::Deque<Waker, N>) -> R, R>(&self, f: F) -> R {
486            // if super::active() {
487            //     let wakers = unsafe { self.wakers.get().as_mut().unwrap() };
488
489            //     f(wakers)
490            // } else {
491            let _guard = self.wakers_cs.enter();
492
493            let wakers = unsafe { self.wakers.get().as_mut().unwrap() };
494
495            f(wakers)
496            // }
497        }
498
499        extern "C" fn task_run(ctx: *mut c_void) {
500            let this =
501                unsafe { (ctx as *mut IsrReactor<N> as *const IsrReactor<N>).as_ref() }.unwrap();
502
503            this.run();
504        }
505    }
506
507    impl<const N: usize> Drop for IsrReactor<N> {
508        fn drop(&mut self) {
509            self.stop();
510        }
511    }
512
513    unsafe impl<const N: usize> Send for IsrReactor<N> {}
514    unsafe impl<const N: usize> Sync for IsrReactor<N> {}
515
516    /// Single-slot lock-free signaling primitive supporting signalling with a `u32` bit-set.
517    ///
518    /// A variation of the `Notification` HAL primitive which is however safe to be notified from an ISR context.
519    ///
520    /// It is useful for sending data between an ISR routine (or a regular task context) and an async task when the
521    /// receiver only cares about the latest data, and therefore it's fine to "lose" messages.
522    /// This is often the case for "state" updates.
523    ///
524    /// The sending part of the primitive is non-blocking and ISR-safe, so it can be called from anywhere.
525    ///
526    /// Similar in spirit to the ESP-IDF FreeRTOS task notifications in that it is light-weight and operates on bit-sets,
527    /// but for synchronization between an asynchronous task, and another one, which might be blocking or asynchronous.
528    pub struct IsrNotification<const N: usize> {
529        inner: Notification,
530        reactor: &'static IsrReactor<N>,
531    }
532
533    impl<const N: usize> IsrNotification<N> {
534        /// Creates a new `IsrNotification`.
535        /// This method is safe to call from an ISR context, yet such use cases should not normally occur in practice.
536        pub const fn new(reactor: &'static IsrReactor<N>) -> Self {
537            Self {
538                inner: Notification::new(),
539                reactor,
540            }
541        }
542
543        /// Marks the least significant bit (bit 0) in this `IsrNotification` as nofified.
544        /// This method is safe to call from an ISR context.
545        /// Returns `true` if there was a registered waker which got awoken.
546        pub fn notify_lsb(&self) -> bool {
547            self.notify(NonZeroU32::new(1).unwrap())
548        }
549
550        /// Marks the supplied bits in this `IsrNotification` as notified.
551        /// This method is safe to call from an ISR context.
552        /// Returns `true` if there was a registered waker which got awoken.
553        pub fn notify(&self, bits: NonZeroU32) -> bool {
554            if let Some(waker) = self.inner.notify_waker(bits) {
555                self.reactor.schedule(waker);
556
557                true
558            } else {
559                false
560            }
561        }
562
563        /// Clears the state of this notification by removing any registered waker and setting all bits to 0.
564        /// This method is NOT safe to call from an ISR context.
565        pub fn reset(&self) {
566            self.inner.reset();
567        }
568
569        /// Future that completes when this `IsrNotification` has been notified.
570        /// This method is NOT safe to call from an ISR context.
571        #[allow(unused)]
572        pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
573            self.reactor.start().unwrap();
574
575            self.inner.wait()
576        }
577
578        /// Non-blocking method to check whether this notification has been notified.
579        /// This method is NOT safe to call from an ISR context.
580        pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
581            self.reactor.start().unwrap();
582
583            self.inner.poll_wait(cx)
584        }
585    }
586
587    /// Single-slot lock-free signaling primitive supporting signalling with a `u32` bit-set.
588    ///
589    /// A variation of the `IsrNotification` HAL primitive which is however safe to be notified from an ISR context.
590    /// The difference between this primitive and `IsrNotification` is that this one is hard-wired to the
591    /// global HAL wake runner (`HAL_WAKE_RUNNER`) and is thus occupying less space.
592    ///
593    /// It is useful for sending data between an ISR routine (or a regular task context) and an async task when the
594    /// receiver only cares about the latest data, and therefore it's fine to "lose" messages.
595    /// This is often the case for "state" updates.
596    ///
597    /// The sending part of the primitive is non-blocking and ISR-safe, so it can be called from anywhere.
598    ///
599    /// Similar in spirit to the ESP-IDF FreeRTOS task notifications in that it is light-weight and operates on bit-sets,
600    /// but for synchronization between an asynchronous task, and another one, which might be blocking or asynchronous.
601    pub struct HalIsrNotification {
602        inner: Notification,
603    }
604
605    impl Default for HalIsrNotification {
606        fn default() -> Self {
607            Self::new()
608        }
609    }
610
611    impl HalIsrNotification {
612        /// Creates a new `HalIsrNotification`.
613        /// This method is safe to call from an ISR context, yet such use cases should not normally occur in practice.
614        pub const fn new() -> Self {
615            Self {
616                inner: Notification::new(),
617            }
618        }
619
620        /// Marks the least significant bit (bit 0) in this `IsrNotification` as nofified.
621        /// This method is safe to call from an ISR context.
622        /// Returns `true` if there was a registered waker which got awoken.
623        pub fn notify_lsb(&self) -> bool {
624            self.notify(NonZeroU32::new(1).unwrap())
625        }
626
627        /// Marks the supplied bits in this `HalIsrNotification` as notified.
628        /// This method is safe to call from an ISR context.
629        /// Returns `true` if there was a registered waker which got awoken.
630        pub fn notify(&self, bits: NonZeroU32) -> bool {
631            if let Some(waker) = self.inner.notify_waker(bits) {
632                HAL_ISR_REACTOR.schedule(waker);
633
634                true
635            } else {
636                false
637            }
638        }
639
640        /// Clears the state of this notification by removing any registered waker and setting all bits to 0.
641        /// This method is NOT safe to call from an ISR context.
642        pub fn reset(&self) {
643            self.inner.reset();
644        }
645
646        /// Future that completes when this `HalIsrNotification` has been notified.
647        /// This method is NOT safe to call from an ISR context.
648        #[allow(unused)]
649        pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
650            HAL_ISR_REACTOR.start().unwrap();
651
652            self.inner.wait()
653        }
654
655        /// Non-blocking method to check whether this notification has been notified.
656        /// This method is NOT safe to call from an ISR context.
657        pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
658            HAL_ISR_REACTOR.start().unwrap();
659
660            self.inner.poll_wait(cx)
661        }
662    }
663}
664
665#[cfg(feature = "embassy-sync")]
666pub mod embassy_sync {
667    use embassy_sync::blocking_mutex::raw::RawMutex;
668
669    /// A mutex that allows borrowing data across executors and interrupts.
670    ///
671    /// # Safety
672    ///
673    /// This mutex is safe to share between different executors and interrupts.
674    pub struct IsrRawMutex(());
675
676    unsafe impl Send for IsrRawMutex {}
677    unsafe impl Sync for IsrRawMutex {}
678
679    impl IsrRawMutex {
680        /// Create a new `IsrRawMutex`.
681        pub const fn new() -> Self {
682            Self(())
683        }
684    }
685
686    unsafe impl RawMutex for IsrRawMutex {
687        const INIT: Self = Self::new();
688
689        fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
690            super::free(f)
691        }
692    }
693}