Skip to main content

esp_idf_hal/
task.rs

1use core::cell::Cell;
2use core::future::Future;
3use core::num::NonZeroU32;
4use core::pin::Pin;
5use core::ptr::{self, NonNull};
6use core::sync::atomic::{AtomicBool, Ordering};
7use core::task::{Context, Poll};
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12use esp_idf_sys::*;
13
14use crate::cpu::Core;
15use crate::interrupt;
16
17#[cfg(not(any(
18    esp_idf_version_major = "4",
19    esp_idf_version = "5.0",
20    esp_idf_version = "5.1"
21)))]
22const NO_AFFINITY: core::ffi::c_int = CONFIG_FREERTOS_NO_AFFINITY as _;
23
24#[cfg(any(
25    esp_idf_version_major = "4",
26    esp_idf_version = "5.0",
27    esp_idf_version = "5.1"
28))]
29const NO_AFFINITY: core::ffi::c_uint = tskNO_AFFINITY;
30
31/// Creates a FreeRTOS task.
32///
33/// This API is to be used only for niche use cases like where the `std` feature is not enabled, or one absolutely
34/// needs to create a raw FreeRTOS task.
35///
36/// In all other cases, the standard, safe Rust `std::thread` API should be utilized, as it is anyway
37/// a thin wrapper around the FreeRTOS task API.
38///
39/// # Safety
40///
41/// Only marked as unsafe for symmetry with `destroy` and to discourage users from leaning on it in favor of `std::thread`.
42/// Otherwise, this function is actually safe.
43pub unsafe fn create(
44    task_handler: extern "C" fn(*mut core::ffi::c_void),
45    task_name: &core::ffi::CStr,
46    stack_size: usize,
47    task_arg: *mut core::ffi::c_void,
48    priority: u8,
49    pin_to_core: Option<Core>,
50) -> Result<TaskHandle_t, EspError> {
51    let mut task: TaskHandle_t = core::ptr::null_mut();
52
53    let created = xTaskCreatePinnedToCore(
54        Some(task_handler),
55        task_name.as_ptr(),
56        stack_size as _,
57        task_arg,
58        priority as _,
59        &mut task,
60        pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
61    );
62
63    if created == 0 {
64        Err(EspError::from_infallible::<ESP_FAIL>())
65    } else {
66        Ok(task)
67    }
68}
69
70/// Deletes a FreeRTOS task.
71///
72/// This API is to be used only for niche use cases like where the `std` feature is not enabled, or one absolutely
73/// needs to create a raw FreeRTOS task.
74///
75/// In all other cases, the standard, safe Rust `std::thread` API should be utilized, as it is anyway
76/// a thin wrapper around the FreeRTOS task API.
77///
78/// # Safety
79///
80/// A valid `TaskHandle_t` instance of an existing task should be provided.
81/// Providing a `TaskHandle_t` of a task which was already destroyed is an undefined behavior.
82pub unsafe fn destroy(task: TaskHandle_t) {
83    vTaskDelete(task)
84}
85
86#[inline(always)]
87#[link_section = ".iram1.interrupt_task_do_yield"]
88pub fn do_yield() {
89    if interrupt::active() {
90        unsafe {
91            if let Some((yielder, arg)) = interrupt::get_isr_yielder() {
92                yielder(arg);
93            } else {
94                #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4))]
95                vPortYieldFromISR();
96
97                #[cfg(all(
98                    not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4)),
99                    esp_idf_version_major = "4"
100                ))]
101                vPortEvaluateYieldFromISR(0);
102
103                #[cfg(all(
104                    not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32c61, esp32p4)),
105                    not(esp_idf_version_major = "4")
106                ))]
107                _frxt_setup_switch();
108            }
109        }
110    } else {
111        unsafe {
112            vPortYield();
113        }
114    }
115}
116
117#[inline(always)]
118#[link_section = ".iram1.interrupt_task_current"]
119pub fn current() -> Option<TaskHandle_t> {
120    if interrupt::active() {
121        None
122    } else {
123        Some(unsafe { xTaskGetCurrentTaskHandle() })
124    }
125}
126
127pub fn wait_notification(timeout: TickType_t) -> Option<NonZeroU32> {
128    let mut notification = 0_u32;
129
130    let notified =
131        unsafe { xTaskGenericNotifyWait(0, 0, u32::MAX, &mut notification, timeout) } != 0;
132
133    if notified {
134        NonZeroU32::new(notification)
135    } else {
136        None
137    }
138}
139
140/// # Safety
141///
142/// When calling this function care should be taken to pass a valid
143/// FreeRTOS task handle. Moreover, the FreeRTOS task should be valid
144/// when this function is being called.
145pub unsafe fn notify_and_yield(task: TaskHandle_t, notification: NonZeroU32) -> bool {
146    let (notified, higher_prio_task_woken) = notify(task, notification);
147
148    if higher_prio_task_woken {
149        do_yield();
150    }
151
152    notified
153}
154
155/// # Safety
156///
157/// When calling this function care should be taken to pass a valid
158/// FreeRTOS task handle. Moreover, the FreeRTOS task should be valid
159/// when this function is being called.
160pub unsafe fn notify(task: TaskHandle_t, notification: NonZeroU32) -> (bool, bool) {
161    let (notified, higher_prio_task_woken) = if interrupt::active() {
162        let mut higher_prio_task_woken: BaseType_t = Default::default();
163
164        let notified = xTaskGenericNotifyFromISR(
165            task,
166            0,
167            notification.into(),
168            eNotifyAction_eSetBits,
169            ptr::null_mut(),
170            &mut higher_prio_task_woken,
171        );
172
173        (notified, higher_prio_task_woken)
174    } else {
175        let notified = xTaskGenericNotify(
176            task,
177            0,
178            notification.into(),
179            eNotifyAction_eSetBits,
180            ptr::null_mut(),
181        );
182
183        (notified, 0)
184    };
185
186    (notified != 0, higher_prio_task_woken != 0)
187}
188
189pub fn get_idle_task(core: crate::cpu::Core) -> TaskHandle_t {
190    #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6))]
191    {
192        if matches!(core, crate::cpu::Core::Core0) {
193            unsafe { xTaskGetIdleTaskHandle() }
194        } else {
195            unreachable!()
196        }
197    }
198
199    #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
200    #[cfg(any(
201        esp_idf_version_major = "4",
202        esp_idf_version = "5.0",
203        esp_idf_version = "5.1"
204    ))]
205    unsafe {
206        xTaskGetIdleTaskHandleForCPU(core as u32)
207    }
208
209    #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
210    #[cfg(not(any(
211        esp_idf_version_major = "4",
212        esp_idf_version = "5.0",
213        esp_idf_version = "5.1"
214    )))]
215    unsafe {
216        xTaskGetIdleTaskHandleForCore(core as i32)
217    }
218}
219
220/// Executes the supplied future on the current thread, thus blocking it until the future becomes ready.
221#[cfg(feature = "alloc")]
222pub fn block_on<F>(fut: F) -> F::Output
223where
224    F: Future,
225{
226    ::log::trace!("block_on(): started");
227
228    let notification = notification::Notification::new();
229
230    let mut fut = core::pin::pin!(fut);
231
232    let waker = notification.notifier().into();
233
234    let mut cx = Context::from_waker(&waker);
235
236    let res = loop {
237        match fut.as_mut().poll(&mut cx) {
238            Poll::Ready(res) => break res,
239            Poll::Pending => notification.wait_any(),
240        }
241    };
242
243    ::log::trace!("block_on(): finished");
244
245    res
246}
247
248/// Yield from the current task once, allowing other tasks to run.
249///
250/// This can be used to easily and quickly implement simple async primitives
251/// without using wakers. The following snippet will wait for a condition to
252/// hold, while still allowing other tasks to run concurrently (not monopolizing
253/// the executor thread).
254///
255/// ```rust,no_run
256/// while !some_condition() {
257///     yield_now().await;
258/// }
259/// ```
260///
261/// The downside is this will spin in a busy loop, using 100% of the CPU, while
262/// using wakers correctly would allow the CPU to sleep while waiting.
263///
264/// The internal implementation is: on first poll the future wakes itself and
265/// returns `Poll::Pending`. On second poll, it returns `Poll::Ready`.
266pub fn yield_now() -> impl Future<Output = ()> {
267    YieldNowFuture { yielded: false }
268}
269
270#[must_use = "futures do nothing unless you `.await` or poll them"]
271struct YieldNowFuture {
272    yielded: bool,
273}
274
275impl Future for YieldNowFuture {
276    type Output = ();
277    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
278        if self.yielded {
279            Poll::Ready(())
280        } else {
281            self.yielded = true;
282            cx.waker().wake_by_ref();
283            Poll::Pending
284        }
285    }
286}
287
288#[cfg(esp_idf_comp_pthread_enabled)]
289pub mod thread {
290    use core::ffi::CStr;
291
292    use enumset::EnumSetType;
293
294    #[cfg(not(any(
295        esp_idf_version_major = "4",
296        all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
297        all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
298        all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
299    )))] // ESP-IDF 5.3 and later
300    use enumset::EnumSet;
301
302    use esp_idf_sys::*;
303
304    use super::NO_AFFINITY;
305
306    use crate::cpu::Core;
307
308    /// Flags to indicate the capabilities of the various memo
309    ///
310    /// Used together with EnumSet
311    /// `let flags = MallocCap:Default | MallocCap:Cap_8bit`
312    #[derive(Debug, EnumSetType)]
313    #[enumset(repr = "u32")] // Note: following value variants represent the bitposition **not** a literal u32 value in an EnumSet<MallocCap>
314    pub enum MallocCap {
315        // Memory must be able to run executable code
316        Exec = 0,
317        // Memory must allow for aligned 32-bit data accesses
318        Cap32bit = 1,
319        // Memory must allow for 8/16/...-bit data accesses
320        Cap8bit = 2,
321        // Memory must be able to accessed by DMA
322        Dma = 3,
323        // Memory must be mapped to PID2 memory space (PIDs are not currently used)
324        Pid2 = 4,
325        // Memory must be mapped to PID3 memory space (PIDs are not currently used)
326        Pid3 = 5,
327        // Memory must be mapped to PID4 memory space (PIDs are not currently used)
328        Pid4 = 6,
329        // Memory must be mapped to PID5 memory space (PIDs are not currently used)
330        Pid5 = 7,
331        // Memory must be mapped to PID6 memory space (PIDs are not currently used)
332        Pid6 = 8,
333        // Memory must be mapped to PID7 memory space (PIDs are not currently used)
334        Pid7 = 9,
335        // Memory must be in SPI RAM
336        Spiram = 10,
337        // Memory must be internal; specifically it should not disappear when flash/spiram cache is switched off
338        Internal = 11,
339        // Memory can be returned in a non-capability-specific memory allocation (e.g. malloc(), calloc()) call
340        Default = 12,
341        // Memory must be in IRAM and allow unaligned access
342        Iram8bit = 13,
343        // Memory must be able to accessed by retention DMA
344        Retention = 14,
345        // Memory must be in RTC fast memory
346        Rtcram = 15,
347        // Memory must be in TCM memory
348        Tcm = 16,
349        // Memory can't be used / list end marker
350        Invalid = 31,
351    }
352    #[derive(Debug)]
353    pub struct ThreadSpawnConfiguration {
354        pub name: Option<&'static CStr>,
355        pub stack_size: usize,
356        pub priority: u8,
357        pub inherit: bool,
358        pub pin_to_core: Option<Core>,
359        #[cfg(not(any(
360            esp_idf_version_major = "4",
361            all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
362            all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
363            all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
364        )))] // ESP-IDF 5.3 and later
365        pub stack_alloc_caps: EnumSet<MallocCap>,
366    }
367
368    impl ThreadSpawnConfiguration {
369        pub fn get() -> Option<Self> {
370            get_conf()
371        }
372
373        pub fn set(&self) -> Result<(), EspError> {
374            set_conf(self)
375        }
376    }
377
378    impl Default for ThreadSpawnConfiguration {
379        fn default() -> Self {
380            get_default_conf()
381        }
382    }
383
384    impl From<&ThreadSpawnConfiguration> for esp_pthread_cfg_t {
385        fn from(conf: &ThreadSpawnConfiguration) -> Self {
386            #[allow(clippy::unwrap_or_default)]
387            Self {
388                thread_name: conf
389                    .name
390                    .map(|name| name.as_ptr() as _)
391                    .unwrap_or(core::ptr::null()),
392                stack_size: conf.stack_size as _,
393                prio: conf.priority as _,
394                inherit_cfg: conf.inherit,
395                pin_to_core: conf.pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
396                #[cfg(not(any(
397                    esp_idf_version_major = "4",
398                    esp_idf_version = "5.0",
399                    esp_idf_version = "5.1",
400                    esp_idf_version = "5.2",
401                )))] // ESP-IDF 5.3 and later
402                stack_alloc_caps: conf.stack_alloc_caps.as_u32(),
403            }
404        }
405    }
406
407    impl From<esp_pthread_cfg_t> for ThreadSpawnConfiguration {
408        fn from(conf: esp_pthread_cfg_t) -> Self {
409            Self {
410                name: if conf.thread_name.is_null() {
411                    None
412                } else {
413                    Some(unsafe { CStr::from_ptr(conf.thread_name) })
414                },
415                stack_size: conf.stack_size as _,
416                priority: conf.prio as _,
417                inherit: conf.inherit_cfg,
418                pin_to_core: if conf.pin_to_core == NO_AFFINITY as _ {
419                    None
420                } else {
421                    Some(conf.pin_to_core.into())
422                },
423                #[cfg(not(any(
424                    esp_idf_version_major = "4",
425                    all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
426                    all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
427                    all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
428                )))] // ESP-IDF 5.3 and later
429                stack_alloc_caps: EnumSet::<MallocCap>::from_u32(conf.stack_alloc_caps),
430            }
431        }
432    }
433
434    fn get_default_conf() -> ThreadSpawnConfiguration {
435        unsafe { esp_pthread_get_default_config() }.into()
436    }
437
438    fn get_conf() -> Option<ThreadSpawnConfiguration> {
439        let mut conf: esp_pthread_cfg_t = Default::default();
440
441        let res = unsafe { esp_pthread_get_cfg(&mut conf as _) };
442
443        if res == ESP_ERR_NOT_FOUND {
444            None
445        } else {
446            Some(conf.into())
447        }
448    }
449
450    fn set_conf(conf: &ThreadSpawnConfiguration) -> Result<(), EspError> {
451        if conf.priority < 1 || conf.priority as u32 >= configMAX_PRIORITIES {
452            panic!("Thread priority {} has to be [1 - 24]", conf.priority);
453        }
454
455        esp!(unsafe { esp_pthread_set_cfg(&conf.into()) })?;
456
457        Ok(())
458    }
459}
460
461pub struct CriticalSection(Cell<Option<NonNull<QueueDefinition>>>, AtomicBool);
462
463// Not available in the esp-idf-sys bindings
464const QUEUE_TYPE_RECURSIVE_MUTEX: u8 = 4;
465
466#[inline(always)]
467#[link_section = ".iram1.cs_enter"]
468fn enter(cs: &CriticalSection) {
469    if !cs.1.load(Ordering::SeqCst) {
470        interrupt::free(|| {
471            if !cs.1.load(Ordering::SeqCst) {
472                let ptr = unsafe { xQueueCreateMutex(QUEUE_TYPE_RECURSIVE_MUTEX) };
473                cs.0.set(NonNull::new(ptr));
474                cs.1.store(true, Ordering::SeqCst);
475            }
476        });
477    }
478
479    let res =
480        unsafe { xQueueTakeMutexRecursive(cs.0.get().unwrap().as_ptr(), crate::delay::BLOCK) } != 0;
481
482    if !res {
483        unreachable!();
484    }
485}
486
487#[inline(always)]
488#[link_section = ".iram1.cs_exit"]
489fn exit(cs: &CriticalSection) {
490    if !cs.1.load(Ordering::SeqCst) {
491        panic!("Called exit() without matching enter()");
492    }
493
494    let res = unsafe { xQueueGiveMutexRecursive(cs.0.get().unwrap().as_ptr()) } != 0;
495
496    if !res {
497        unreachable!();
498    }
499}
500
501impl CriticalSection {
502    /// Constructs a new `CriticalSection` instance
503    #[inline(always)]
504    pub const fn new() -> Self {
505        Self(Cell::new(None), AtomicBool::new(false))
506    }
507
508    #[inline(always)]
509    pub fn enter(&self) -> CriticalSectionGuard<'_> {
510        enter(self);
511
512        CriticalSectionGuard(self)
513    }
514}
515
516impl Drop for CriticalSection {
517    fn drop(&mut self) {
518        if self.1.load(Ordering::SeqCst) {
519            unsafe {
520                vQueueDelete(self.0.get().unwrap().as_ptr());
521            }
522        }
523    }
524}
525
526impl Default for CriticalSection {
527    #[inline(always)]
528    fn default() -> Self {
529        Self::new()
530    }
531}
532
533unsafe impl Send for CriticalSection {}
534unsafe impl Sync for CriticalSection {}
535
536pub struct CriticalSectionGuard<'a>(&'a CriticalSection);
537
538impl Drop for CriticalSectionGuard<'_> {
539    #[inline(always)]
540    fn drop(&mut self) {
541        exit(self.0);
542    }
543}
544
545pub mod watchdog {
546    //! ## Example
547    //!
548    //! ```rust, ignore
549    //! # fn main() -> Result<()> {
550    //! let peripherals = Peripherals::take().unwrap();
551    //!
552    //! let config = TWDTConfig {
553    //!     duration: Duration::from_secs(2),
554    //!     panic_on_trigger: true,
555    //!     subscribed_idle_tasks: enum_set!(Core::Core0)
556    //! };
557    //! let mut driver = esp_idf_hal::task::watchdog::TWDTDriver::new(
558    //!     peripherals.twdt,
559    //!     &config,
560    //! )?;
561    //!
562    //! let mut watchdog = driver.watch_current_task()?;
563    //!
564    //! loop {
565    //!     watchdog.feed();
566    //!     unsafe { vTaskDelay(1) };
567    //! }
568    //! # }
569    //! ```
570
571    use core::{
572        marker::PhantomData,
573        sync::atomic::{AtomicUsize, Ordering},
574    };
575
576    use esp_idf_sys::*;
577
578    pub type TWDTConfig = config::Config;
579
580    pub mod config {
581
582        #[cfg(not(esp_idf_version_major = "4"))]
583        use esp_idf_sys::*;
584
585        #[derive(Clone)]
586        pub struct Config {
587            pub duration: core::time::Duration,
588            pub panic_on_trigger: bool,
589            pub subscribed_idle_tasks: enumset::EnumSet<crate::cpu::Core>,
590        }
591
592        impl Config {
593            // Could be const if enumset operations are const
594            pub fn new() -> Self {
595                #[cfg(esp_idf_esp_task_wdt)]
596                let duration = core::time::Duration::from_secs(
597                    esp_idf_sys::CONFIG_ESP_TASK_WDT_TIMEOUT_S as u64,
598                );
599                #[cfg(not(esp_idf_esp_task_wdt))]
600                let duration = core::time::Duration::from_secs(5);
601                Self {
602                    duration,
603                    panic_on_trigger: cfg!(esp_idf_esp_task_wdt_panic),
604                    subscribed_idle_tasks: {
605                        let mut subscribed_idle_tasks = enumset::EnumSet::empty();
606                        if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu0) {
607                            subscribed_idle_tasks |= crate::cpu::Core::Core0;
608                        }
609                        #[cfg(any(esp32, esp32s3))]
610                        if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu1) {
611                            subscribed_idle_tasks |= crate::cpu::Core::Core1;
612                        }
613                        subscribed_idle_tasks
614                    },
615                }
616            }
617        }
618
619        impl Default for Config {
620            fn default() -> Self {
621                Self::new()
622            }
623        }
624
625        #[cfg(not(esp_idf_version_major = "4"))]
626        impl From<&Config> for esp_task_wdt_config_t {
627            fn from(config: &Config) -> Self {
628                esp_task_wdt_config_t {
629                    timeout_ms: config.duration.as_millis() as u32,
630                    trigger_panic: config.panic_on_trigger,
631                    idle_core_mask: config.subscribed_idle_tasks.as_u32(),
632                }
633            }
634        }
635    }
636
637    pub struct TWDTDriver<'d> {
638        init_by_idf: bool,
639        _marker: PhantomData<&'d mut ()>,
640    }
641
642    static TWDT_DRIVER_REF_COUNT: AtomicUsize = AtomicUsize::new(0);
643
644    impl<'d> TWDTDriver<'d> {
645        pub fn new(_twdt: TWDT<'d>, config: &config::Config) -> Result<Self, EspError> {
646            TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
647            let init_by_idf = Self::watchdog_is_init_by_idf();
648
649            #[cfg(not(esp_idf_version_major = "4"))]
650            if !init_by_idf {
651                esp!(unsafe { esp_task_wdt_init(&config.into() as *const esp_task_wdt_config_t) })?;
652            } else {
653                esp!(unsafe {
654                    esp_task_wdt_reconfigure(&config.into() as *const esp_task_wdt_config_t)
655                })?;
656            }
657
658            #[cfg(esp_idf_version_major = "4")]
659            esp!(unsafe {
660                esp_task_wdt_init(config.duration.as_secs() as u32, config.panic_on_trigger)
661            })?;
662
663            #[cfg(esp_idf_version_major = "4")]
664            if let Err(e) = Self::subscribe_idle_tasks(config.subscribed_idle_tasks) {
665                // error task already subscribed could occur but it's ok (not checking if tasks already subscribed before)
666                if e.code() != ESP_ERR_INVALID_ARG {
667                    return Err(e);
668                }
669            }
670
671            Ok(Self {
672                init_by_idf,
673                _marker: Default::default(),
674            })
675        }
676
677        pub fn watch_current_task(&mut self) -> Result<WatchdogSubscription<'_>, EspError> {
678            esp!(unsafe { esp_task_wdt_add(core::ptr::null_mut()) })?;
679            Ok(WatchdogSubscription::new())
680        }
681
682        #[cfg(esp_idf_version_major = "4")]
683        fn subscribe_idle_tasks(cores: enumset::EnumSet<crate::cpu::Core>) -> Result<(), EspError> {
684            for core in cores {
685                let task = super::get_idle_task(core);
686                esp!(unsafe { esp_task_wdt_add(task) })?;
687            }
688
689            Ok(())
690        }
691
692        #[cfg(esp_idf_version_major = "4")]
693        fn unsubscribe_idle_tasks() -> Result<(), EspError> {
694            for core in enumset::EnumSet::<crate::cpu::Core>::all() {
695                let task = super::get_idle_task(core);
696                esp!(unsafe { esp_task_wdt_delete(task) })?;
697            }
698
699            Ok(())
700        }
701
702        fn watchdog_is_init_by_idf() -> bool {
703            if cfg!(not(any(
704                esp_idf_version_major = "4",
705                esp_idf_version = "5.0"
706            ))) {
707                cfg!(esp_idf_esp_task_wdt_init)
708            } else {
709                !matches!(
710                    unsafe { esp_task_wdt_status(core::ptr::null_mut()) },
711                    ESP_ERR_INVALID_STATE
712                )
713            }
714        }
715
716        fn deinit(&self) -> Result<(), EspError> {
717            if !self.init_by_idf {
718                #[cfg(esp_idf_version_major = "4")]
719                if let Err(e) = Self::unsubscribe_idle_tasks() {
720                    // error task not subscribed could occur but it's ok (not checking if tasks subscribed before)
721                    if e.code() != ESP_ERR_INVALID_ARG {
722                        return Err(e);
723                    }
724                }
725                esp!(unsafe { esp_task_wdt_deinit() }).unwrap();
726            }
727
728            Ok(())
729        }
730    }
731
732    impl Clone for TWDTDriver<'_> {
733        fn clone(&self) -> Self {
734            TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
735            Self {
736                init_by_idf: self.init_by_idf,
737                _marker: Default::default(),
738            }
739        }
740    }
741
742    impl Drop for TWDTDriver<'_> {
743        fn drop(&mut self) {
744            let refcnt = TWDT_DRIVER_REF_COUNT.fetch_sub(1, Ordering::SeqCst);
745            match refcnt {
746                1 => self.deinit().unwrap(),
747                r if r < 1 => unreachable!(), // Bug, should never happen
748                _ => (),
749            }
750        }
751    }
752
753    unsafe impl Send for TWDTDriver<'_> {}
754
755    pub struct WatchdogSubscription<'s>(PhantomData<&'s mut ()>);
756
757    impl WatchdogSubscription<'_> {
758        fn new() -> Self {
759            Self(Default::default())
760        }
761
762        pub fn feed(&mut self) -> Result<(), EspError> {
763            esp!(unsafe { esp_task_wdt_reset() })
764        }
765    }
766
767    impl embedded_hal_0_2::watchdog::Watchdog for WatchdogSubscription<'_> {
768        fn feed(&mut self) {
769            Self::feed(self).unwrap()
770        }
771    }
772
773    impl Drop for WatchdogSubscription<'_> {
774        fn drop(&mut self) {
775            esp!(unsafe { esp_task_wdt_delete(core::ptr::null_mut()) }).unwrap();
776        }
777    }
778
779    crate::impl_peripheral!(TWDT);
780}
781
782#[cfg(feature = "critical-section")]
783pub mod critical_section {
784    static CS: super::CriticalSection = super::CriticalSection::new();
785
786    pub struct EspCriticalSection {}
787
788    unsafe impl critical_section::Impl for EspCriticalSection {
789        unsafe fn acquire() {
790            super::enter(&CS);
791        }
792
793        unsafe fn release(_token: ()) {
794            super::exit(&CS);
795        }
796    }
797
798    pub type LinkWorkaround = [*mut (); 2];
799
800    static mut __INTERNAL_REFERENCE: LinkWorkaround = [
801        _critical_section_1_0_acquire as *mut _,
802        _critical_section_1_0_release as *mut _,
803    ];
804
805    pub fn link() -> LinkWorkaround {
806        unsafe { __INTERNAL_REFERENCE }
807    }
808
809    critical_section::set_impl!(EspCriticalSection);
810}
811
812pub mod embassy_sync {
813    use embassy_sync::blocking_mutex::raw::RawMutex;
814
815    /// A mutex that allows borrowing data across executors but NOT accross interrupts.
816    ///
817    /// # Safety
818    ///
819    /// This mutex is safe to share between different executors.
820    pub struct EspRawMutex(super::CriticalSection);
821
822    unsafe impl Send for EspRawMutex {}
823    unsafe impl Sync for EspRawMutex {}
824
825    impl Default for EspRawMutex {
826        fn default() -> Self {
827            Self::new()
828        }
829    }
830
831    impl EspRawMutex {
832        /// Create a new `EspRawMutex`.
833        pub const fn new() -> Self {
834            Self(super::CriticalSection::new())
835        }
836    }
837
838    unsafe impl RawMutex for EspRawMutex {
839        #[allow(clippy::declare_interior_mutable_const)]
840        const INIT: Self = Self::new();
841
842        fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
843            let _guard = self.0.enter();
844
845            f()
846        }
847    }
848}
849
850#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
851pub mod notification {
852    use core::marker::PhantomData;
853    use core::num::NonZeroU32;
854    use core::sync::atomic::{AtomicPtr, Ordering};
855
856    extern crate alloc;
857    use alloc::sync::Arc;
858    use alloc::task::Wake;
859
860    use esp_idf_sys::TickType_t;
861
862    use crate::task;
863
864    #[cfg(esp_idf_version_major = "4")]
865    type Task = core::ffi::c_void;
866
867    #[cfg(not(esp_idf_version_major = "4"))]
868    type Task = esp_idf_sys::tskTaskControlBlock;
869
870    pub struct Notification(Arc<Notifier>, PhantomData<*const ()>);
871
872    impl Notification {
873        pub fn new() -> Self {
874            Self(
875                Arc::new(Notifier(AtomicPtr::new(task::current().unwrap()))),
876                PhantomData,
877            )
878        }
879
880        pub fn notifier(&self) -> Arc<Notifier> {
881            self.0.clone()
882        }
883
884        pub fn wait_any(&self) {
885            loop {
886                if task::wait_notification(crate::delay::BLOCK).is_some() {
887                    break;
888                }
889            }
890        }
891
892        pub fn wait(&self, timeout: TickType_t) -> Option<NonZeroU32> {
893            task::wait_notification(timeout)
894        }
895    }
896
897    impl Default for Notification {
898        fn default() -> Self {
899            Self::new()
900        }
901    }
902
903    pub struct Notifier(AtomicPtr<Task>);
904
905    impl Notifier {
906        /// # Safety
907        ///
908        /// Care should be taken to ensure that `Notifier` does not outlive the task
909        /// in which the `Notification` that produced it was created.
910        ///
911        /// If that happens, a dangling pointer instead of proper task handle will be passed to `task::notify`,
912        /// which will result in memory corruption.
913        pub unsafe fn notify(&self, notification: NonZeroU32) -> (bool, bool) {
914            let freertos_task = self.0.load(Ordering::SeqCst);
915
916            if !freertos_task.is_null() {
917                return unsafe { task::notify(freertos_task, notification) };
918            }
919
920            (false, false)
921        }
922
923        /// # Safety
924        ///
925        /// Care should be taken to ensure that `Notifier` does not outlive the task
926        /// in which the `Notification` that produced it was created.
927        ///
928        /// If that happens, a dangling pointer instead of proper task handle will be passed to `task::notify_and_yield`,
929        /// which will result in memory corruption.
930        pub unsafe fn notify_and_yield(&self, notification: NonZeroU32) -> bool {
931            let freertos_task = self.0.load(Ordering::SeqCst);
932
933            if !freertos_task.is_null() {
934                unsafe { task::notify_and_yield(freertos_task, notification) }
935            } else {
936                false
937            }
938        }
939    }
940
941    impl Wake for Notifier {
942        fn wake(self: Arc<Self>) {
943            unsafe {
944                self.notify_and_yield(NonZeroU32::new(1).unwrap());
945            }
946        }
947    }
948}
949
950pub mod queue {
951    use core::{
952        marker::PhantomData,
953        mem::{size_of, MaybeUninit},
954    };
955
956    use esp_idf_sys::{EspError, TickType_t, ESP_FAIL};
957
958    use crate::sys;
959
960    /// Thin wrapper on top of the FreeRTOS queue.
961    ///
962    /// This may be preferable over a Rust channel
963    /// in cases where an ISR needs to send or receive
964    /// data as it is safe to use in ISR contexts.
965    pub struct Queue<T> {
966        ptr: sys::QueueHandle_t,
967        is_owned: bool,
968        _marker: PhantomData<T>,
969    }
970
971    unsafe impl<T> Send for Queue<T> where T: Send + Sync {}
972    unsafe impl<T> Sync for Queue<T> where T: Send + Sync {}
973
974    impl<T> Queue<T>
975    where
976        // ensures the contained elements are not `Drop`
977        // might be able to lift restriction in the future
978        T: Copy,
979    {
980        /// Allocate a new queue on the heap.
981        pub fn new(size: usize) -> Self {
982            Queue {
983                ptr: unsafe { sys::xQueueGenericCreate(size as u32, size_of::<T>() as u32, 0) },
984                is_owned: true,
985                _marker: PhantomData,
986            }
987        }
988
989        /// Create a new queue which is not deleted on `Drop`, but owned by somebody else.
990        ///
991        /// # Safety
992        ///
993        /// Care must be taken that the queue is valid for the constructed
994        /// lifetime.
995        pub unsafe fn new_borrowed(ptr: sys::QueueHandle_t) -> Self {
996            assert!(!ptr.is_null());
997
998            Queue {
999                ptr,
1000                is_owned: false,
1001                _marker: PhantomData,
1002            }
1003        }
1004
1005        /// Retrieves the underlying FreeRTOS handle.
1006        #[inline]
1007        pub fn as_raw(&self) -> sys::QueueHandle_t {
1008            self.ptr
1009        }
1010
1011        /// Copy item to back of queue, blocking for `timeout` ticks if full.
1012        ///
1013        /// # ISR safety
1014        ///
1015        /// This function is safe to call in ISR contexts.
1016        ///
1017        /// # Parameters
1018        ///
1019        /// * `item` the item to push onto the back of the queue
1020        /// * `timeout` specifies how long to block. Ignored in ISR context.
1021        ///
1022        /// # Returns
1023        ///
1024        /// Will return an error if queue is full.
1025        /// If this function is executed in an ISR context,
1026        /// it will return true if a higher priority task was awoken.
1027        /// In non-ISR contexts, the function will always return `false`.
1028        /// In this case the interrupt should call [`crate::task::do_yield`].
1029        #[inline]
1030        pub fn send_back(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
1031            self.send_generic(item, timeout, 0)
1032        }
1033
1034        /// Copy item to front of queue, blocking for `timeout` ticks if full.
1035        /// This can be used for hight priority messages which should be processed
1036        /// sooner.
1037        ///
1038        /// # ISR safety
1039        ///
1040        /// This function is safe to call in ISR contexts.
1041        ///
1042        /// # Parameters
1043        ///
1044        /// * `item` the item to push to front of the queue
1045        /// * `timeout` specifies how long to block. Ignored in ISR context.
1046        ///
1047        /// # Returns
1048        ///
1049        /// Will return an error if queue is full.
1050        /// If this function is executed in an ISR context,
1051        /// it will return true if a higher priority task was awoken.
1052        /// In non-ISR contexts, the function will always return `false`.
1053        /// In this case the interrupt should call [`crate::task::do_yield`].
1054        #[inline]
1055        pub fn send_front(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
1056            self.send_generic(item, timeout, 1)
1057        }
1058
1059        /// Copy item to queue, blocking for `timeout` ticks if full.
1060        ///
1061        /// # ISR safety
1062        ///
1063        /// This function is safe to call in ISR contexts.
1064        ///
1065        /// # Parameters
1066        ///
1067        /// * `item` the item to push to the queue
1068        /// * `timeout` specifies how long to block. Ignored in ISR context.
1069        /// * `copy_position` 0 to push onto back, 1 to push to front
1070        ///
1071        /// # Returns
1072        ///
1073        /// Will return an error if queue is full.
1074        /// If this function is executed in an ISR context,
1075        /// it will return true if a higher priority task was awoken.
1076        /// In non-ISR contexts, the function will always return `false`.
1077        /// In this case the interrupt should call [`crate::task::do_yield`].
1078        #[inline]
1079        fn send_generic(
1080            &self,
1081            item: T,
1082            timeout: TickType_t,
1083            copy_position: i32,
1084        ) -> Result<bool, EspError> {
1085            let mut hp_task_awoken: i32 = false as i32;
1086            let success = unsafe {
1087                if crate::interrupt::active() {
1088                    sys::xQueueGenericSendFromISR(
1089                        self.ptr,
1090                        &item as *const T as *const _,
1091                        &mut hp_task_awoken,
1092                        copy_position,
1093                    )
1094                } else {
1095                    sys::xQueueGenericSend(
1096                        self.ptr,
1097                        &item as *const T as *const _,
1098                        timeout,
1099                        copy_position,
1100                    )
1101                }
1102            };
1103            let success = success == 1;
1104            let hp_task_awoken = hp_task_awoken == 1;
1105
1106            match success {
1107                true => Ok(hp_task_awoken),
1108                false => Err(EspError::from_infallible::<ESP_FAIL>()),
1109            }
1110        }
1111
1112        /// Receive a message from the queue and remove it.
1113        ///
1114        /// # ISR safety
1115        ///
1116        /// This function is safe to use in ISR contexts
1117        ///
1118        /// # Parameters
1119        ///
1120        /// * `timeout` specifies how long to block. Ignored in ISR contexts.
1121        ///
1122        /// # Returns
1123        ///
1124        /// * `None` if no message could be received in time
1125        /// * `Some((message, higher_priority_task_awoken))` otherwise
1126        ///
1127        /// The boolean is used for ISRs and indicates if a higher priority task was awoken.
1128        /// In this case the interrupt should call [`crate::task::do_yield`].
1129        /// In non-ISR contexts, the function will always return `false`.
1130        #[inline]
1131        pub fn recv_front(&self, timeout: TickType_t) -> Option<(T, bool)> {
1132            let mut buf = MaybeUninit::uninit();
1133            let mut hp_task_awoken = false as i32;
1134
1135            unsafe {
1136                let success = if crate::interrupt::active() {
1137                    sys::xQueueReceiveFromISR(
1138                        self.ptr,
1139                        buf.as_mut_ptr() as *mut _,
1140                        &mut hp_task_awoken,
1141                    )
1142                } else {
1143                    sys::xQueueReceive(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
1144                };
1145                if success == 1 {
1146                    Some((buf.assume_init(), hp_task_awoken == 1))
1147                } else {
1148                    None
1149                }
1150            }
1151        }
1152
1153        /// Copy the first message from the queue without removing it.
1154        ///
1155        /// # ISR safety
1156        ///
1157        /// This function is safe to use in ISR contexts
1158        ///
1159        /// # Parameters
1160        ///
1161        /// * `timeout` specifies how long to block. Ignored in ISR contexts.
1162        ///
1163        /// # Returns
1164        ///
1165        /// * `None` if no message could be received in time
1166        /// * `Some(message)` otherwise
1167        ///
1168        /// This function does not return a boolean to indicate if
1169        /// a higher priority task was awoken since we don't free
1170        /// up space in the queue and thus cannot unblock anyone.
1171        #[inline]
1172        pub fn peek_front(&self, timeout: TickType_t) -> Option<T> {
1173            let mut buf = MaybeUninit::uninit();
1174
1175            unsafe {
1176                let success = if crate::interrupt::active() {
1177                    sys::xQueuePeekFromISR(self.ptr, buf.as_mut_ptr() as *mut _)
1178                } else {
1179                    sys::xQueuePeek(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
1180                };
1181                if success == 1 {
1182                    Some(buf.assume_init())
1183                } else {
1184                    None
1185                }
1186            }
1187        }
1188    }
1189
1190    impl<T> Drop for Queue<T> {
1191        fn drop(&mut self) {
1192            if self.is_owned {
1193                unsafe { sys::vQueueDelete(self.ptr) }
1194            }
1195        }
1196    }
1197}
1198
1199pub mod asynch {
1200    use core::future::Future;
1201    use core::num::NonZeroU32;
1202    use core::sync::atomic::{AtomicU32, Ordering};
1203    use core::task::{Context, Poll, Waker};
1204
1205    use atomic_waker::AtomicWaker;
1206
1207    /// Single-slot lock-free signaling primitive supporting signalling with a `u32` bit-set.
1208    ///
1209    /// It is useful for sending data between tasks when the receiver only cares about
1210    /// the latest data, and therefore it's fine to "lose" messages. This is often the case for "state"
1211    /// updates.
1212    ///
1213    /// The sending part of the primitive is non-blocking, so it is also useful for notifying asynchronous tasks
1214    /// from contexts where blocking or async wait is not possible.
1215    ///
1216    /// Similar in spirit to the ESP-IDF FreeRTOS task notifications in that it is light-weight and operates on bit-sets,
1217    /// but for synchronization between an asynchronous task, and another one, which might be blocking or asynchronous.
1218    pub struct Notification {
1219        waker: AtomicWaker,
1220        notified: AtomicU32,
1221    }
1222
1223    impl Default for Notification {
1224        fn default() -> Self {
1225            Self::new()
1226        }
1227    }
1228
1229    impl Notification {
1230        /// Creates a new `Notification`.
1231        pub const fn new() -> Self {
1232            Self {
1233                waker: AtomicWaker::new(),
1234                notified: AtomicU32::new(0),
1235            }
1236        }
1237
1238        /// Marks the least significant bit (bit 0) in this `IsrNotification` as nofified.
1239        /// Returns `true` if there was a registered waker which got awoken.
1240        pub fn notify_lsb(&self) -> bool {
1241            self.notify(NonZeroU32::new(1).unwrap())
1242        }
1243
1244        /// Marks the supplied bits in this `Notification` as notified.
1245        /// Returns `true` if there was a registered waker which got awoken.
1246        pub fn notify(&self, bits: NonZeroU32) -> bool {
1247            if let Some(waker) = self.notify_waker(bits) {
1248                waker.wake();
1249
1250                true
1251            } else {
1252                false
1253            }
1254        }
1255
1256        /// A utility to help in implementing a custom `wait` logic:
1257        /// Adds the supplied bits as notified in the notification instance and returns the registered waker (if any).
1258        pub fn notify_waker(&self, bits: NonZeroU32) -> Option<Waker> {
1259            self.notified.fetch_or(bits.into(), Ordering::SeqCst);
1260
1261            self.waker.take()
1262        }
1263
1264        /// Clears the state of this notification by removing any registered waker and setting all bits to 0.
1265        pub fn reset(&self) {
1266            self.waker.take();
1267            self.notified.store(0, Ordering::SeqCst);
1268        }
1269
1270        /// Future that completes when this `Notification` has been notified.
1271        #[allow(unused)]
1272        pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
1273            core::future::poll_fn(move |cx| self.poll_wait(cx))
1274        }
1275
1276        /// Non-blocking method to check whether this notification has been notified.
1277        pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
1278            self.waker.register(cx.waker());
1279
1280            let bits = self.notified.swap(0, Ordering::SeqCst);
1281
1282            if let Some(bits) = NonZeroU32::new(bits) {
1283                Poll::Ready(bits)
1284            } else {
1285                Poll::Pending
1286            }
1287        }
1288    }
1289
1290    impl Drop for Notification {
1291        fn drop(&mut self) {
1292            self.reset();
1293        }
1294    }
1295}