1use core::cell::Cell;
2use core::future::Future;
3use core::num::NonZeroU32;
4use core::pin::Pin;
5use core::ptr::{self, NonNull};
6use core::sync::atomic::{AtomicBool, Ordering};
7use core::task::{Context, Poll};
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12use esp_idf_sys::*;
13
14use crate::cpu::Core;
15use crate::interrupt;
16
17#[cfg(not(any(
18 esp_idf_version_major = "4",
19 esp_idf_version = "5.0",
20 esp_idf_version = "5.1"
21)))]
22const NO_AFFINITY: core::ffi::c_int = CONFIG_FREERTOS_NO_AFFINITY as _;
23
24#[cfg(any(
25 esp_idf_version_major = "4",
26 esp_idf_version = "5.0",
27 esp_idf_version = "5.1"
28))]
29const NO_AFFINITY: core::ffi::c_uint = tskNO_AFFINITY;
30
31pub unsafe fn create(
44 task_handler: extern "C" fn(*mut core::ffi::c_void),
45 task_name: &core::ffi::CStr,
46 stack_size: usize,
47 task_arg: *mut core::ffi::c_void,
48 priority: u8,
49 pin_to_core: Option<Core>,
50) -> Result<TaskHandle_t, EspError> {
51 let mut task: TaskHandle_t = core::ptr::null_mut();
52
53 let created = xTaskCreatePinnedToCore(
54 Some(task_handler),
55 task_name.as_ptr(),
56 stack_size as _,
57 task_arg,
58 priority as _,
59 &mut task,
60 pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
61 );
62
63 if created == 0 {
64 Err(EspError::from_infallible::<ESP_FAIL>())
65 } else {
66 Ok(task)
67 }
68}
69
70pub unsafe fn destroy(task: TaskHandle_t) {
83 vTaskDelete(task)
84}
85
86#[inline(always)]
87#[link_section = ".iram1.interrupt_task_do_yield"]
88pub fn do_yield() {
89 if interrupt::active() {
90 unsafe {
91 if let Some((yielder, arg)) = interrupt::get_isr_yielder() {
92 yielder(arg);
93 } else {
94 #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4))]
95 vPortYieldFromISR();
96
97 #[cfg(all(
98 not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4)),
99 esp_idf_version_major = "4"
100 ))]
101 vPortEvaluateYieldFromISR(0);
102
103 #[cfg(all(
104 not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32c61, esp32p4)),
105 not(esp_idf_version_major = "4")
106 ))]
107 _frxt_setup_switch();
108 }
109 }
110 } else {
111 unsafe {
112 vPortYield();
113 }
114 }
115}
116
117#[inline(always)]
118#[link_section = ".iram1.interrupt_task_current"]
119pub fn current() -> Option<TaskHandle_t> {
120 if interrupt::active() {
121 None
122 } else {
123 Some(unsafe { xTaskGetCurrentTaskHandle() })
124 }
125}
126
127pub fn wait_notification(timeout: TickType_t) -> Option<NonZeroU32> {
128 let mut notification = 0_u32;
129
130 let notified =
131 unsafe { xTaskGenericNotifyWait(0, 0, u32::MAX, &mut notification, timeout) } != 0;
132
133 if notified {
134 NonZeroU32::new(notification)
135 } else {
136 None
137 }
138}
139
140pub unsafe fn notify_and_yield(task: TaskHandle_t, notification: NonZeroU32) -> bool {
146 let (notified, higher_prio_task_woken) = notify(task, notification);
147
148 if higher_prio_task_woken {
149 do_yield();
150 }
151
152 notified
153}
154
155pub unsafe fn notify(task: TaskHandle_t, notification: NonZeroU32) -> (bool, bool) {
161 let (notified, higher_prio_task_woken) = if interrupt::active() {
162 let mut higher_prio_task_woken: BaseType_t = Default::default();
163
164 let notified = xTaskGenericNotifyFromISR(
165 task,
166 0,
167 notification.into(),
168 eNotifyAction_eSetBits,
169 ptr::null_mut(),
170 &mut higher_prio_task_woken,
171 );
172
173 (notified, higher_prio_task_woken)
174 } else {
175 let notified = xTaskGenericNotify(
176 task,
177 0,
178 notification.into(),
179 eNotifyAction_eSetBits,
180 ptr::null_mut(),
181 );
182
183 (notified, 0)
184 };
185
186 (notified != 0, higher_prio_task_woken != 0)
187}
188
189pub fn get_idle_task(core: crate::cpu::Core) -> TaskHandle_t {
190 #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6))]
191 {
192 if matches!(core, crate::cpu::Core::Core0) {
193 unsafe { xTaskGetIdleTaskHandle() }
194 } else {
195 unreachable!()
196 }
197 }
198
199 #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
200 #[cfg(any(
201 esp_idf_version_major = "4",
202 esp_idf_version = "5.0",
203 esp_idf_version = "5.1"
204 ))]
205 unsafe {
206 xTaskGetIdleTaskHandleForCPU(core as u32)
207 }
208
209 #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
210 #[cfg(not(any(
211 esp_idf_version_major = "4",
212 esp_idf_version = "5.0",
213 esp_idf_version = "5.1"
214 )))]
215 unsafe {
216 xTaskGetIdleTaskHandleForCore(core as i32)
217 }
218}
219
220#[cfg(feature = "alloc")]
222pub fn block_on<F>(fut: F) -> F::Output
223where
224 F: Future,
225{
226 ::log::trace!("block_on(): started");
227
228 let notification = notification::Notification::new();
229
230 let mut fut = core::pin::pin!(fut);
231
232 let waker = notification.notifier().into();
233
234 let mut cx = Context::from_waker(&waker);
235
236 let res = loop {
237 match fut.as_mut().poll(&mut cx) {
238 Poll::Ready(res) => break res,
239 Poll::Pending => notification.wait_any(),
240 }
241 };
242
243 ::log::trace!("block_on(): finished");
244
245 res
246}
247
248pub fn yield_now() -> impl Future<Output = ()> {
267 YieldNowFuture { yielded: false }
268}
269
270#[must_use = "futures do nothing unless you `.await` or poll them"]
271struct YieldNowFuture {
272 yielded: bool,
273}
274
275impl Future for YieldNowFuture {
276 type Output = ();
277 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
278 if self.yielded {
279 Poll::Ready(())
280 } else {
281 self.yielded = true;
282 cx.waker().wake_by_ref();
283 Poll::Pending
284 }
285 }
286}
287
288#[cfg(esp_idf_comp_pthread_enabled)]
289pub mod thread {
290 use core::ffi::CStr;
291
292 use enumset::EnumSetType;
293
294 #[cfg(not(any(
295 esp_idf_version_major = "4",
296 all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
297 all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
298 all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
299 )))] use enumset::EnumSet;
301
302 use esp_idf_sys::*;
303
304 use super::NO_AFFINITY;
305
306 use crate::cpu::Core;
307
308 #[derive(Debug, EnumSetType)]
313 #[enumset(repr = "u32")] pub enum MallocCap {
315 Exec = 0,
317 Cap32bit = 1,
319 Cap8bit = 2,
321 Dma = 3,
323 Pid2 = 4,
325 Pid3 = 5,
327 Pid4 = 6,
329 Pid5 = 7,
331 Pid6 = 8,
333 Pid7 = 9,
335 Spiram = 10,
337 Internal = 11,
339 Default = 12,
341 Iram8bit = 13,
343 Retention = 14,
345 Rtcram = 15,
347 Tcm = 16,
349 Invalid = 31,
351 }
352 #[derive(Debug)]
353 pub struct ThreadSpawnConfiguration {
354 pub name: Option<&'static CStr>,
355 pub stack_size: usize,
356 pub priority: u8,
357 pub inherit: bool,
358 pub pin_to_core: Option<Core>,
359 #[cfg(not(any(
360 esp_idf_version_major = "4",
361 all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
362 all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
363 all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
364 )))] pub stack_alloc_caps: EnumSet<MallocCap>,
366 }
367
368 impl ThreadSpawnConfiguration {
369 pub fn get() -> Option<Self> {
370 get_conf()
371 }
372
373 pub fn set(&self) -> Result<(), EspError> {
374 set_conf(self)
375 }
376 }
377
378 impl Default for ThreadSpawnConfiguration {
379 fn default() -> Self {
380 get_default_conf()
381 }
382 }
383
384 impl From<&ThreadSpawnConfiguration> for esp_pthread_cfg_t {
385 fn from(conf: &ThreadSpawnConfiguration) -> Self {
386 #[allow(clippy::unwrap_or_default)]
387 Self {
388 thread_name: conf
389 .name
390 .map(|name| name.as_ptr() as _)
391 .unwrap_or(core::ptr::null()),
392 stack_size: conf.stack_size as _,
393 prio: conf.priority as _,
394 inherit_cfg: conf.inherit,
395 pin_to_core: conf.pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
396 #[cfg(not(any(
397 esp_idf_version_major = "4",
398 esp_idf_version = "5.0",
399 esp_idf_version = "5.1",
400 esp_idf_version = "5.2",
401 )))] stack_alloc_caps: conf.stack_alloc_caps.as_u32(),
403 }
404 }
405 }
406
407 impl From<esp_pthread_cfg_t> for ThreadSpawnConfiguration {
408 fn from(conf: esp_pthread_cfg_t) -> Self {
409 Self {
410 name: if conf.thread_name.is_null() {
411 None
412 } else {
413 Some(unsafe { CStr::from_ptr(conf.thread_name) })
414 },
415 stack_size: conf.stack_size as _,
416 priority: conf.prio as _,
417 inherit: conf.inherit_cfg,
418 pin_to_core: if conf.pin_to_core == NO_AFFINITY as _ {
419 None
420 } else {
421 Some(conf.pin_to_core.into())
422 },
423 #[cfg(not(any(
424 esp_idf_version_major = "4",
425 all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
426 all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
427 all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
428 )))] stack_alloc_caps: EnumSet::<MallocCap>::from_u32(conf.stack_alloc_caps),
430 }
431 }
432 }
433
434 fn get_default_conf() -> ThreadSpawnConfiguration {
435 unsafe { esp_pthread_get_default_config() }.into()
436 }
437
438 fn get_conf() -> Option<ThreadSpawnConfiguration> {
439 let mut conf: esp_pthread_cfg_t = Default::default();
440
441 let res = unsafe { esp_pthread_get_cfg(&mut conf as _) };
442
443 if res == ESP_ERR_NOT_FOUND {
444 None
445 } else {
446 Some(conf.into())
447 }
448 }
449
450 fn set_conf(conf: &ThreadSpawnConfiguration) -> Result<(), EspError> {
451 if conf.priority < 1 || conf.priority as u32 >= configMAX_PRIORITIES {
452 panic!("Thread priority {} has to be [1 - 24]", conf.priority);
453 }
454
455 esp!(unsafe { esp_pthread_set_cfg(&conf.into()) })?;
456
457 Ok(())
458 }
459}
460
461pub struct CriticalSection(Cell<Option<NonNull<QueueDefinition>>>, AtomicBool);
462
463const QUEUE_TYPE_RECURSIVE_MUTEX: u8 = 4;
465
466#[inline(always)]
467#[link_section = ".iram1.cs_enter"]
468fn enter(cs: &CriticalSection) {
469 if !cs.1.load(Ordering::SeqCst) {
470 interrupt::free(|| {
471 if !cs.1.load(Ordering::SeqCst) {
472 let ptr = unsafe { xQueueCreateMutex(QUEUE_TYPE_RECURSIVE_MUTEX) };
473 cs.0.set(NonNull::new(ptr));
474 cs.1.store(true, Ordering::SeqCst);
475 }
476 });
477 }
478
479 let res =
480 unsafe { xQueueTakeMutexRecursive(cs.0.get().unwrap().as_ptr(), crate::delay::BLOCK) } != 0;
481
482 if !res {
483 unreachable!();
484 }
485}
486
487#[inline(always)]
488#[link_section = ".iram1.cs_exit"]
489fn exit(cs: &CriticalSection) {
490 if !cs.1.load(Ordering::SeqCst) {
491 panic!("Called exit() without matching enter()");
492 }
493
494 let res = unsafe { xQueueGiveMutexRecursive(cs.0.get().unwrap().as_ptr()) } != 0;
495
496 if !res {
497 unreachable!();
498 }
499}
500
501impl CriticalSection {
502 #[inline(always)]
504 pub const fn new() -> Self {
505 Self(Cell::new(None), AtomicBool::new(false))
506 }
507
508 #[inline(always)]
509 pub fn enter(&self) -> CriticalSectionGuard<'_> {
510 enter(self);
511
512 CriticalSectionGuard(self)
513 }
514}
515
516impl Drop for CriticalSection {
517 fn drop(&mut self) {
518 if self.1.load(Ordering::SeqCst) {
519 unsafe {
520 vQueueDelete(self.0.get().unwrap().as_ptr());
521 }
522 }
523 }
524}
525
526impl Default for CriticalSection {
527 #[inline(always)]
528 fn default() -> Self {
529 Self::new()
530 }
531}
532
533unsafe impl Send for CriticalSection {}
534unsafe impl Sync for CriticalSection {}
535
536pub struct CriticalSectionGuard<'a>(&'a CriticalSection);
537
538impl Drop for CriticalSectionGuard<'_> {
539 #[inline(always)]
540 fn drop(&mut self) {
541 exit(self.0);
542 }
543}
544
545pub mod watchdog {
546 use core::{
572 marker::PhantomData,
573 sync::atomic::{AtomicUsize, Ordering},
574 };
575
576 use esp_idf_sys::*;
577
578 pub type TWDTConfig = config::Config;
579
580 pub mod config {
581
582 #[cfg(not(esp_idf_version_major = "4"))]
583 use esp_idf_sys::*;
584
585 #[derive(Clone)]
586 pub struct Config {
587 pub duration: core::time::Duration,
588 pub panic_on_trigger: bool,
589 pub subscribed_idle_tasks: enumset::EnumSet<crate::cpu::Core>,
590 }
591
592 impl Config {
593 pub fn new() -> Self {
595 #[cfg(esp_idf_esp_task_wdt)]
596 let duration = core::time::Duration::from_secs(
597 esp_idf_sys::CONFIG_ESP_TASK_WDT_TIMEOUT_S as u64,
598 );
599 #[cfg(not(esp_idf_esp_task_wdt))]
600 let duration = core::time::Duration::from_secs(5);
601 Self {
602 duration,
603 panic_on_trigger: cfg!(esp_idf_esp_task_wdt_panic),
604 subscribed_idle_tasks: {
605 let mut subscribed_idle_tasks = enumset::EnumSet::empty();
606 if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu0) {
607 subscribed_idle_tasks |= crate::cpu::Core::Core0;
608 }
609 #[cfg(any(esp32, esp32s3))]
610 if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu1) {
611 subscribed_idle_tasks |= crate::cpu::Core::Core1;
612 }
613 subscribed_idle_tasks
614 },
615 }
616 }
617 }
618
619 impl Default for Config {
620 fn default() -> Self {
621 Self::new()
622 }
623 }
624
625 #[cfg(not(esp_idf_version_major = "4"))]
626 impl From<&Config> for esp_task_wdt_config_t {
627 fn from(config: &Config) -> Self {
628 esp_task_wdt_config_t {
629 timeout_ms: config.duration.as_millis() as u32,
630 trigger_panic: config.panic_on_trigger,
631 idle_core_mask: config.subscribed_idle_tasks.as_u32(),
632 }
633 }
634 }
635 }
636
637 pub struct TWDTDriver<'d> {
638 init_by_idf: bool,
639 _marker: PhantomData<&'d mut ()>,
640 }
641
642 static TWDT_DRIVER_REF_COUNT: AtomicUsize = AtomicUsize::new(0);
643
644 impl<'d> TWDTDriver<'d> {
645 pub fn new(_twdt: TWDT<'d>, config: &config::Config) -> Result<Self, EspError> {
646 TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
647 let init_by_idf = Self::watchdog_is_init_by_idf();
648
649 #[cfg(not(esp_idf_version_major = "4"))]
650 if !init_by_idf {
651 esp!(unsafe { esp_task_wdt_init(&config.into() as *const esp_task_wdt_config_t) })?;
652 } else {
653 esp!(unsafe {
654 esp_task_wdt_reconfigure(&config.into() as *const esp_task_wdt_config_t)
655 })?;
656 }
657
658 #[cfg(esp_idf_version_major = "4")]
659 esp!(unsafe {
660 esp_task_wdt_init(config.duration.as_secs() as u32, config.panic_on_trigger)
661 })?;
662
663 #[cfg(esp_idf_version_major = "4")]
664 if let Err(e) = Self::subscribe_idle_tasks(config.subscribed_idle_tasks) {
665 if e.code() != ESP_ERR_INVALID_ARG {
667 return Err(e);
668 }
669 }
670
671 Ok(Self {
672 init_by_idf,
673 _marker: Default::default(),
674 })
675 }
676
677 pub fn watch_current_task(&mut self) -> Result<WatchdogSubscription<'_>, EspError> {
678 esp!(unsafe { esp_task_wdt_add(core::ptr::null_mut()) })?;
679 Ok(WatchdogSubscription::new())
680 }
681
682 #[cfg(esp_idf_version_major = "4")]
683 fn subscribe_idle_tasks(cores: enumset::EnumSet<crate::cpu::Core>) -> Result<(), EspError> {
684 for core in cores {
685 let task = super::get_idle_task(core);
686 esp!(unsafe { esp_task_wdt_add(task) })?;
687 }
688
689 Ok(())
690 }
691
692 #[cfg(esp_idf_version_major = "4")]
693 fn unsubscribe_idle_tasks() -> Result<(), EspError> {
694 for core in enumset::EnumSet::<crate::cpu::Core>::all() {
695 let task = super::get_idle_task(core);
696 esp!(unsafe { esp_task_wdt_delete(task) })?;
697 }
698
699 Ok(())
700 }
701
702 fn watchdog_is_init_by_idf() -> bool {
703 if cfg!(not(any(
704 esp_idf_version_major = "4",
705 esp_idf_version = "5.0"
706 ))) {
707 cfg!(esp_idf_esp_task_wdt_init)
708 } else {
709 !matches!(
710 unsafe { esp_task_wdt_status(core::ptr::null_mut()) },
711 ESP_ERR_INVALID_STATE
712 )
713 }
714 }
715
716 fn deinit(&self) -> Result<(), EspError> {
717 if !self.init_by_idf {
718 #[cfg(esp_idf_version_major = "4")]
719 if let Err(e) = Self::unsubscribe_idle_tasks() {
720 if e.code() != ESP_ERR_INVALID_ARG {
722 return Err(e);
723 }
724 }
725 esp!(unsafe { esp_task_wdt_deinit() }).unwrap();
726 }
727
728 Ok(())
729 }
730 }
731
732 impl Clone for TWDTDriver<'_> {
733 fn clone(&self) -> Self {
734 TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
735 Self {
736 init_by_idf: self.init_by_idf,
737 _marker: Default::default(),
738 }
739 }
740 }
741
742 impl Drop for TWDTDriver<'_> {
743 fn drop(&mut self) {
744 let refcnt = TWDT_DRIVER_REF_COUNT.fetch_sub(1, Ordering::SeqCst);
745 match refcnt {
746 1 => self.deinit().unwrap(),
747 r if r < 1 => unreachable!(), _ => (),
749 }
750 }
751 }
752
753 unsafe impl Send for TWDTDriver<'_> {}
754
755 pub struct WatchdogSubscription<'s>(PhantomData<&'s mut ()>);
756
757 impl WatchdogSubscription<'_> {
758 fn new() -> Self {
759 Self(Default::default())
760 }
761
762 pub fn feed(&mut self) -> Result<(), EspError> {
763 esp!(unsafe { esp_task_wdt_reset() })
764 }
765 }
766
767 impl embedded_hal_0_2::watchdog::Watchdog for WatchdogSubscription<'_> {
768 fn feed(&mut self) {
769 Self::feed(self).unwrap()
770 }
771 }
772
773 impl Drop for WatchdogSubscription<'_> {
774 fn drop(&mut self) {
775 esp!(unsafe { esp_task_wdt_delete(core::ptr::null_mut()) }).unwrap();
776 }
777 }
778
779 crate::impl_peripheral!(TWDT);
780}
781
782#[cfg(feature = "critical-section")]
783pub mod critical_section {
784 static CS: super::CriticalSection = super::CriticalSection::new();
785
786 pub struct EspCriticalSection {}
787
788 unsafe impl critical_section::Impl for EspCriticalSection {
789 unsafe fn acquire() {
790 super::enter(&CS);
791 }
792
793 unsafe fn release(_token: ()) {
794 super::exit(&CS);
795 }
796 }
797
798 pub type LinkWorkaround = [*mut (); 2];
799
800 static mut __INTERNAL_REFERENCE: LinkWorkaround = [
801 _critical_section_1_0_acquire as *mut _,
802 _critical_section_1_0_release as *mut _,
803 ];
804
805 pub fn link() -> LinkWorkaround {
806 unsafe { __INTERNAL_REFERENCE }
807 }
808
809 critical_section::set_impl!(EspCriticalSection);
810}
811
812pub mod embassy_sync {
813 use embassy_sync::blocking_mutex::raw::RawMutex;
814
815 pub struct EspRawMutex(super::CriticalSection);
821
822 unsafe impl Send for EspRawMutex {}
823 unsafe impl Sync for EspRawMutex {}
824
825 impl Default for EspRawMutex {
826 fn default() -> Self {
827 Self::new()
828 }
829 }
830
831 impl EspRawMutex {
832 pub const fn new() -> Self {
834 Self(super::CriticalSection::new())
835 }
836 }
837
838 unsafe impl RawMutex for EspRawMutex {
839 #[allow(clippy::declare_interior_mutable_const)]
840 const INIT: Self = Self::new();
841
842 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
843 let _guard = self.0.enter();
844
845 f()
846 }
847 }
848}
849
850#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
851pub mod notification {
852 use core::marker::PhantomData;
853 use core::num::NonZeroU32;
854 use core::sync::atomic::{AtomicPtr, Ordering};
855
856 extern crate alloc;
857 use alloc::sync::Arc;
858 use alloc::task::Wake;
859
860 use esp_idf_sys::TickType_t;
861
862 use crate::task;
863
864 #[cfg(esp_idf_version_major = "4")]
865 type Task = core::ffi::c_void;
866
867 #[cfg(not(esp_idf_version_major = "4"))]
868 type Task = esp_idf_sys::tskTaskControlBlock;
869
870 pub struct Notification(Arc<Notifier>, PhantomData<*const ()>);
871
872 impl Notification {
873 pub fn new() -> Self {
874 Self(
875 Arc::new(Notifier(AtomicPtr::new(task::current().unwrap()))),
876 PhantomData,
877 )
878 }
879
880 pub fn notifier(&self) -> Arc<Notifier> {
881 self.0.clone()
882 }
883
884 pub fn wait_any(&self) {
885 loop {
886 if task::wait_notification(crate::delay::BLOCK).is_some() {
887 break;
888 }
889 }
890 }
891
892 pub fn wait(&self, timeout: TickType_t) -> Option<NonZeroU32> {
893 task::wait_notification(timeout)
894 }
895 }
896
897 impl Default for Notification {
898 fn default() -> Self {
899 Self::new()
900 }
901 }
902
903 pub struct Notifier(AtomicPtr<Task>);
904
905 impl Notifier {
906 pub unsafe fn notify(&self, notification: NonZeroU32) -> (bool, bool) {
914 let freertos_task = self.0.load(Ordering::SeqCst);
915
916 if !freertos_task.is_null() {
917 return unsafe { task::notify(freertos_task, notification) };
918 }
919
920 (false, false)
921 }
922
923 pub unsafe fn notify_and_yield(&self, notification: NonZeroU32) -> bool {
931 let freertos_task = self.0.load(Ordering::SeqCst);
932
933 if !freertos_task.is_null() {
934 unsafe { task::notify_and_yield(freertos_task, notification) }
935 } else {
936 false
937 }
938 }
939 }
940
941 impl Wake for Notifier {
942 fn wake(self: Arc<Self>) {
943 unsafe {
944 self.notify_and_yield(NonZeroU32::new(1).unwrap());
945 }
946 }
947 }
948}
949
950pub mod queue {
951 use core::{
952 marker::PhantomData,
953 mem::{size_of, MaybeUninit},
954 };
955
956 use esp_idf_sys::{EspError, TickType_t, ESP_FAIL};
957
958 use crate::sys;
959
960 pub struct Queue<T> {
966 ptr: sys::QueueHandle_t,
967 is_owned: bool,
968 _marker: PhantomData<T>,
969 }
970
971 unsafe impl<T> Send for Queue<T> where T: Send + Sync {}
972 unsafe impl<T> Sync for Queue<T> where T: Send + Sync {}
973
974 impl<T> Queue<T>
975 where
976 T: Copy,
979 {
980 pub fn new(size: usize) -> Self {
982 Queue {
983 ptr: unsafe { sys::xQueueGenericCreate(size as u32, size_of::<T>() as u32, 0) },
984 is_owned: true,
985 _marker: PhantomData,
986 }
987 }
988
989 pub unsafe fn new_borrowed(ptr: sys::QueueHandle_t) -> Self {
996 assert!(!ptr.is_null());
997
998 Queue {
999 ptr,
1000 is_owned: false,
1001 _marker: PhantomData,
1002 }
1003 }
1004
1005 #[inline]
1007 pub fn as_raw(&self) -> sys::QueueHandle_t {
1008 self.ptr
1009 }
1010
1011 #[inline]
1030 pub fn send_back(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
1031 self.send_generic(item, timeout, 0)
1032 }
1033
1034 #[inline]
1055 pub fn send_front(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
1056 self.send_generic(item, timeout, 1)
1057 }
1058
1059 #[inline]
1079 fn send_generic(
1080 &self,
1081 item: T,
1082 timeout: TickType_t,
1083 copy_position: i32,
1084 ) -> Result<bool, EspError> {
1085 let mut hp_task_awoken: i32 = false as i32;
1086 let success = unsafe {
1087 if crate::interrupt::active() {
1088 sys::xQueueGenericSendFromISR(
1089 self.ptr,
1090 &item as *const T as *const _,
1091 &mut hp_task_awoken,
1092 copy_position,
1093 )
1094 } else {
1095 sys::xQueueGenericSend(
1096 self.ptr,
1097 &item as *const T as *const _,
1098 timeout,
1099 copy_position,
1100 )
1101 }
1102 };
1103 let success = success == 1;
1104 let hp_task_awoken = hp_task_awoken == 1;
1105
1106 match success {
1107 true => Ok(hp_task_awoken),
1108 false => Err(EspError::from_infallible::<ESP_FAIL>()),
1109 }
1110 }
1111
1112 #[inline]
1131 pub fn recv_front(&self, timeout: TickType_t) -> Option<(T, bool)> {
1132 let mut buf = MaybeUninit::uninit();
1133 let mut hp_task_awoken = false as i32;
1134
1135 unsafe {
1136 let success = if crate::interrupt::active() {
1137 sys::xQueueReceiveFromISR(
1138 self.ptr,
1139 buf.as_mut_ptr() as *mut _,
1140 &mut hp_task_awoken,
1141 )
1142 } else {
1143 sys::xQueueReceive(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
1144 };
1145 if success == 1 {
1146 Some((buf.assume_init(), hp_task_awoken == 1))
1147 } else {
1148 None
1149 }
1150 }
1151 }
1152
1153 #[inline]
1172 pub fn peek_front(&self, timeout: TickType_t) -> Option<T> {
1173 let mut buf = MaybeUninit::uninit();
1174
1175 unsafe {
1176 let success = if crate::interrupt::active() {
1177 sys::xQueuePeekFromISR(self.ptr, buf.as_mut_ptr() as *mut _)
1178 } else {
1179 sys::xQueuePeek(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
1180 };
1181 if success == 1 {
1182 Some(buf.assume_init())
1183 } else {
1184 None
1185 }
1186 }
1187 }
1188 }
1189
1190 impl<T> Drop for Queue<T> {
1191 fn drop(&mut self) {
1192 if self.is_owned {
1193 unsafe { sys::vQueueDelete(self.ptr) }
1194 }
1195 }
1196 }
1197}
1198
1199pub mod asynch {
1200 use core::future::Future;
1201 use core::num::NonZeroU32;
1202 use core::sync::atomic::{AtomicU32, Ordering};
1203 use core::task::{Context, Poll, Waker};
1204
1205 use atomic_waker::AtomicWaker;
1206
1207 pub struct Notification {
1219 waker: AtomicWaker,
1220 notified: AtomicU32,
1221 }
1222
1223 impl Default for Notification {
1224 fn default() -> Self {
1225 Self::new()
1226 }
1227 }
1228
1229 impl Notification {
1230 pub const fn new() -> Self {
1232 Self {
1233 waker: AtomicWaker::new(),
1234 notified: AtomicU32::new(0),
1235 }
1236 }
1237
1238 pub fn notify_lsb(&self) -> bool {
1241 self.notify(NonZeroU32::new(1).unwrap())
1242 }
1243
1244 pub fn notify(&self, bits: NonZeroU32) -> bool {
1247 if let Some(waker) = self.notify_waker(bits) {
1248 waker.wake();
1249
1250 true
1251 } else {
1252 false
1253 }
1254 }
1255
1256 pub fn notify_waker(&self, bits: NonZeroU32) -> Option<Waker> {
1259 self.notified.fetch_or(bits.into(), Ordering::SeqCst);
1260
1261 self.waker.take()
1262 }
1263
1264 pub fn reset(&self) {
1266 self.waker.take();
1267 self.notified.store(0, Ordering::SeqCst);
1268 }
1269
1270 #[allow(unused)]
1272 pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
1273 core::future::poll_fn(move |cx| self.poll_wait(cx))
1274 }
1275
1276 pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
1278 self.waker.register(cx.waker());
1279
1280 let bits = self.notified.swap(0, Ordering::SeqCst);
1281
1282 if let Some(bits) = NonZeroU32::new(bits) {
1283 Poll::Ready(bits)
1284 } else {
1285 Poll::Pending
1286 }
1287 }
1288 }
1289
1290 impl Drop for Notification {
1291 fn drop(&mut self) {
1292 self.reset();
1293 }
1294 }
1295}