1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
use core::cell::Cell;
use core::future::Future;
use core::num::NonZeroU32;
use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::sync::atomic::{AtomicBool, Ordering};
use core::task::{Context, Poll};

#[cfg(feature = "alloc")]
extern crate alloc;

use esp_idf_sys::*;

use crate::cpu::Core;
use crate::interrupt;

#[cfg(not(any(
    esp_idf_version_major = "4",
    esp_idf_version = "5.0",
    esp_idf_version = "5.1"
)))]
const NO_AFFINITY: core::ffi::c_int = CONFIG_FREERTOS_NO_AFFINITY as _;

#[cfg(any(
    esp_idf_version_major = "4",
    esp_idf_version = "5.0",
    esp_idf_version = "5.1"
))]
const NO_AFFINITY: core::ffi::c_uint = tskNO_AFFINITY;

/// Creates a FreeRTOS task.
///
/// This API is to be used only for niche use cases like where the `std` feature is not enabled, or one absolutely
/// needs to create a raw FreeRTOS task.
///
/// In all other cases, the standard, safe Rust `std::thread` API should be utilized, as it is anyway
/// a thin wrapper around the FreeRTOS task API.
///
/// # Safety
///
/// Only marked as unsafe fo symmetry with `destroy` and to discourage users from leaning on it in favor of `std::thread`.
/// Otherwise, this function is actually safe.
pub unsafe fn create(
    task_handler: extern "C" fn(*mut core::ffi::c_void),
    task_name: &core::ffi::CStr,
    stack_size: usize,
    task_arg: *mut core::ffi::c_void,
    priority: u8,
    pin_to_core: Option<Core>,
) -> Result<TaskHandle_t, EspError> {
    let mut task: TaskHandle_t = core::ptr::null_mut();

    let created = xTaskCreatePinnedToCore(
        Some(task_handler),
        task_name.as_ptr(),
        stack_size as _,
        task_arg,
        priority as _,
        &mut task,
        pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
    );

    if created == 0 {
        Err(EspError::from_infallible::<ESP_FAIL>())
    } else {
        Ok(task)
    }
}

/// Deletes a FreeRTOS task.
///
/// This API is to be used only for niche use cases like where the `std` feature is not enabled, or one absolutely
/// needs to create a raw FreeRTOS task.
///
/// In all other cases, the standard, safe Rust `std::thread` API should be utilized, as it is anyway
/// a thin wrapper around the FreeRTOS task API.
///
/// # Safety
///
/// A valid `TaskHandle_t` instance of an existing task should be provided.
/// Providing a `TaskHandle_t` of a task which was already destroyed is an undefined behavior.
pub unsafe fn destroy(task: TaskHandle_t) {
    vTaskDelete(task)
}

#[inline(always)]
#[link_section = ".iram1.interrupt_task_do_yield"]
pub fn do_yield() {
    if interrupt::active() {
        unsafe {
            if let Some((yielder, arg)) = interrupt::get_isr_yielder() {
                yielder(arg);
            } else {
                #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6))]
                vPortYieldFromISR();

                #[cfg(all(
                    not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)),
                    esp_idf_version_major = "4"
                ))]
                vPortEvaluateYieldFromISR(0);

                #[cfg(all(
                    not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)),
                    not(esp_idf_version_major = "4")
                ))]
                _frxt_setup_switch();
            }
        }
    } else {
        unsafe {
            vPortYield();
        }
    }
}

#[inline(always)]
#[link_section = ".iram1.interrupt_task_current"]
pub fn current() -> Option<TaskHandle_t> {
    if interrupt::active() {
        None
    } else {
        Some(unsafe { xTaskGetCurrentTaskHandle() })
    }
}

pub fn wait_notification(timeout: TickType_t) -> Option<NonZeroU32> {
    let mut notification = 0_u32;

    #[cfg(esp_idf_version = "4.3")]
    #[deprecated(
        note = "Using ESP-IDF 4.3 is untested, please upgrade to 4.4 or newer. Support will be removed in the next major release."
    )]
    let notified = unsafe { xTaskNotifyWait(0, u32::MAX, &mut notification, timeout) } != 0;

    #[cfg(not(esp_idf_version = "4.3"))]
    let notified =
        unsafe { xTaskGenericNotifyWait(0, 0, u32::MAX, &mut notification, timeout) } != 0;

    if notified {
        NonZeroU32::new(notification)
    } else {
        None
    }
}

/// # Safety
///
/// When calling this function care should be taken to pass a valid
/// FreeRTOS task handle. Moreover, the FreeRTOS task should be valid
/// when this function is being called.
pub unsafe fn notify_and_yield(task: TaskHandle_t, notification: NonZeroU32) -> bool {
    let (notified, higher_prio_task_woken) = notify(task, notification);

    if higher_prio_task_woken {
        do_yield();
    }

    notified
}

/// # Safety
///
/// When calling this function care should be taken to pass a valid
/// FreeRTOS task handle. Moreover, the FreeRTOS task should be valid
/// when this function is being called.
pub unsafe fn notify(task: TaskHandle_t, notification: NonZeroU32) -> (bool, bool) {
    let (notified, higher_prio_task_woken) = if interrupt::active() {
        let mut higher_prio_task_woken: BaseType_t = Default::default();

        #[cfg(esp_idf_version = "4.3")]
        #[deprecated(
            note = "Using ESP-IDF 4.3 is untested, please upgrade to 4.4 or newer. Support will be removed in the next major release."
        )]
        let notified = xTaskGenericNotifyFromISR(
            task,
            notification.into(),
            eNotifyAction_eSetBits,
            ptr::null_mut(),
            &mut higher_prio_task_woken,
        );

        #[cfg(not(esp_idf_version = "4.3"))]
        let notified = xTaskGenericNotifyFromISR(
            task,
            0,
            notification.into(),
            eNotifyAction_eSetBits,
            ptr::null_mut(),
            &mut higher_prio_task_woken,
        );

        (notified, higher_prio_task_woken)
    } else {
        #[cfg(esp_idf_version = "4.3")]
        #[deprecated(
            note = "Using ESP-IDF 4.3 is untested, please upgrade to 4.4 or newer. Support will be removed in the next major release."
        )]
        let notified = xTaskGenericNotify(
            task,
            notification.into(),
            eNotifyAction_eSetBits,
            ptr::null_mut(),
        );

        #[cfg(not(esp_idf_version = "4.3"))]
        let notified = xTaskGenericNotify(
            task,
            0,
            notification.into(),
            eNotifyAction_eSetBits,
            ptr::null_mut(),
        );

        (notified, 0)
    };

    (notified != 0, higher_prio_task_woken != 0)
}

pub fn get_idle_task(core: crate::cpu::Core) -> TaskHandle_t {
    #[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6))]
    {
        if matches!(core, crate::cpu::Core::Core0) {
            unsafe { xTaskGetIdleTaskHandle() }
        } else {
            unreachable!()
        }
    }

    #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
    #[cfg(any(
        esp_idf_version_major = "4",
        esp_idf_version = "5.0",
        esp_idf_version = "5.1"
    ))]
    unsafe {
        xTaskGetIdleTaskHandleForCPU(core as u32)
    }

    #[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
    #[cfg(not(any(
        esp_idf_version_major = "4",
        esp_idf_version = "5.0",
        esp_idf_version = "5.1"
    )))]
    unsafe {
        xTaskGetIdleTaskHandleForCore(core as i32)
    }
}

/// Executes the supplied future on the current thread, thus blocking it until the future becomes ready.
#[cfg(feature = "alloc")]
pub fn block_on<F>(mut fut: F) -> F::Output
where
    F: Future,
{
    ::log::trace!("block_on(): started");

    let notification = notification::Notification::new();

    let mut fut = core::pin::pin!(fut);

    let waker = notification.notifier().into();

    let mut cx = Context::from_waker(&waker);

    let res = loop {
        match fut.as_mut().poll(&mut cx) {
            Poll::Ready(res) => break res,
            Poll::Pending => notification.wait_any(),
        }
    };

    ::log::trace!("block_on(): finished");

    res
}

/// Yield from the current task once, allowing other tasks to run.
///
/// This can be used to easily and quickly implement simple async primitives
/// without using wakers. The following snippet will wait for a condition to
/// hold, while still allowing other tasks to run concurrently (not monopolizing
/// the executor thread).
///
/// ```rust,no_run
/// while !some_condition() {
///     yield_now().await;
/// }
/// ```
///
/// The downside is this will spin in a busy loop, using 100% of the CPU, while
/// using wakers correctly would allow the CPU to sleep while waiting.
///
/// The internal implementation is: on first poll the future wakes itself and
/// returns `Poll::Pending`. On second poll, it returns `Poll::Ready`.
pub fn yield_now() -> impl Future<Output = ()> {
    YieldNowFuture { yielded: false }
}

#[must_use = "futures do nothing unless you `.await` or poll them"]
struct YieldNowFuture {
    yielded: bool,
}

impl Future for YieldNowFuture {
    type Output = ();
    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
        if self.yielded {
            Poll::Ready(())
        } else {
            self.yielded = true;
            cx.waker().wake_by_ref();
            Poll::Pending
        }
    }
}

#[cfg(esp_idf_comp_pthread_enabled)]
pub mod thread {
    use core::ffi::CStr;

    use esp_idf_sys::*;

    use super::NO_AFFINITY;

    use crate::cpu::Core;

    #[derive(Debug)]
    pub struct ThreadSpawnConfiguration {
        pub name: Option<&'static [u8]>,
        pub stack_size: usize,
        pub priority: u8,
        pub inherit: bool,
        pub pin_to_core: Option<Core>,
    }

    impl ThreadSpawnConfiguration {
        pub fn get() -> Option<Self> {
            get_conf()
        }

        pub fn set(&self) -> Result<(), EspError> {
            set_conf(self)
        }
    }

    impl Default for ThreadSpawnConfiguration {
        fn default() -> Self {
            get_default_conf()
        }
    }

    impl From<&ThreadSpawnConfiguration> for esp_pthread_cfg_t {
        fn from(conf: &ThreadSpawnConfiguration) -> Self {
            Self {
                thread_name: conf
                    .name
                    .map(|name| name.as_ptr() as _)
                    .unwrap_or(core::ptr::null()),
                stack_size: conf.stack_size as _,
                prio: conf.priority as _,
                inherit_cfg: conf.inherit,
                pin_to_core: conf.pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
            }
        }
    }

    impl From<esp_pthread_cfg_t> for ThreadSpawnConfiguration {
        fn from(conf: esp_pthread_cfg_t) -> Self {
            Self {
                name: if conf.thread_name.is_null() {
                    None
                } else {
                    Some(unsafe {
                        core::slice::from_raw_parts(
                            conf.thread_name as _,
                            c_strlen(conf.thread_name.cast()) + 1,
                        )
                    })
                },
                stack_size: conf.stack_size as _,
                priority: conf.prio as _,
                inherit: conf.inherit_cfg,
                pin_to_core: if conf.pin_to_core == NO_AFFINITY as _ {
                    None
                } else {
                    Some(conf.pin_to_core.into())
                },
            }
        }
    }

    fn get_default_conf() -> ThreadSpawnConfiguration {
        unsafe { esp_pthread_get_default_config() }.into()
    }

    fn get_conf() -> Option<ThreadSpawnConfiguration> {
        let mut conf: esp_pthread_cfg_t = Default::default();

        let res = unsafe { esp_pthread_get_cfg(&mut conf as _) };

        if res == ESP_ERR_NOT_FOUND {
            None
        } else {
            Some(conf.into())
        }
    }

    fn set_conf(conf: &ThreadSpawnConfiguration) -> Result<(), EspError> {
        if let Some(name) = conf.name {
            let _str = CStr::from_bytes_with_nul(name)
                .map_err(|_e| panic! {"Missing null byte in provided Thread-Name"});
        }

        if conf.priority < 1 || conf.priority as u32 >= configMAX_PRIORITIES {
            panic!("Thread priority {} has to be [1 - 24]", conf.priority);
        }

        esp!(unsafe { esp_pthread_set_cfg(&conf.into()) })?;

        Ok(())
    }

    fn c_strlen(c_str: *const u8) -> usize {
        let mut offset = 0;

        loop {
            if *unsafe { c_str.offset(offset).as_ref() }.unwrap() == 0 {
                return offset as _;
            }

            offset += 1;
        }
    }
}

pub struct CriticalSection(Cell<Option<NonNull<QueueDefinition>>>, AtomicBool);

// Not available in the esp-idf-sys bindings
const QUEUE_TYPE_RECURSIVE_MUTEX: u8 = 4;

#[inline(always)]
#[link_section = ".iram1.cs_enter"]
fn enter(cs: &CriticalSection) {
    if !cs.1.load(Ordering::SeqCst) {
        interrupt::free(|| {
            if !cs.1.load(Ordering::SeqCst) {
                let ptr = unsafe { xQueueCreateMutex(QUEUE_TYPE_RECURSIVE_MUTEX) };
                cs.0.set(NonNull::new(ptr));
                cs.1.store(true, Ordering::SeqCst);
            }
        });
    }

    let res =
        unsafe { xQueueTakeMutexRecursive(cs.0.get().unwrap().as_ptr(), crate::delay::BLOCK) } != 0;

    if !res {
        unreachable!();
    }
}

#[inline(always)]
#[link_section = ".iram1.cs_exit"]
fn exit(cs: &CriticalSection) {
    if !cs.1.load(Ordering::SeqCst) {
        panic!("Called exit() without matching enter()");
    }

    let res = unsafe { xQueueGiveMutexRecursive(cs.0.get().unwrap().as_ptr()) } != 0;

    if !res {
        unreachable!();
    }
}

impl CriticalSection {
    /// Constructs a new `CriticalSection` instance
    #[inline(always)]
    #[link_section = ".iram1.cs_new"]
    pub const fn new() -> Self {
        Self(Cell::new(None), AtomicBool::new(false))
    }

    #[inline(always)]
    #[link_section = ".iram1.cs_enter"]
    pub fn enter(&self) -> CriticalSectionGuard {
        enter(self);

        CriticalSectionGuard(self)
    }
}

impl Drop for CriticalSection {
    fn drop(&mut self) {
        if self.1.load(Ordering::SeqCst) {
            unsafe {
                vQueueDelete(self.0.get().unwrap().as_ptr());
            }
        }
    }
}

impl Default for CriticalSection {
    #[inline(always)]
    #[link_section = ".iram1.cs_default"]
    fn default() -> Self {
        Self::new()
    }
}

unsafe impl Send for CriticalSection {}
unsafe impl Sync for CriticalSection {}

pub struct CriticalSectionGuard<'a>(&'a CriticalSection);

impl<'a> Drop for CriticalSectionGuard<'a> {
    #[inline(always)]
    #[link_section = ".iram1.csg_drop"]
    fn drop(&mut self) {
        exit(self.0);
    }
}

#[cfg(any(
    all(
        not(any(esp_idf_version_major = "4", esp_idf_version = "5.0")),
        esp_idf_esp_task_wdt_en
    ),
    any(esp_idf_version_major = "4", esp_idf_version = "5.0")
))]
pub mod watchdog {
    //! ## Example
    //!
    //! ```rust, ignore
    //! # fn main() -> Result<()> {
    //! let peripherals = Peripherals::take().unwrap();
    //!
    //! let config = TWDTConfig {
    //!     duration: Duration::from_secs(2),
    //!     panic_on_trigger: true,
    //!     subscribed_idle_tasks: enum_set!(Core::Core0)
    //! };
    //! let mut driver = esp_idf_hal::task::watchdog::TWDTDriver::new(
    //!     peripherals.twdt,
    //!     &config,
    //! )?;
    //!
    //! let mut watchdog = driver.watch_current_task()?;
    //!
    //! loop {
    //!     watchdog.feed();
    //!     unsafe { vTaskDelay(1) };
    //! }
    //! # }
    //! ```

    use core::{
        marker::PhantomData,
        sync::atomic::{AtomicUsize, Ordering},
    };

    use esp_idf_sys::*;

    use crate::peripheral::Peripheral;

    pub type TWDTConfig = config::Config;

    pub mod config {

        #[cfg(not(esp_idf_version_major = "4"))]
        use esp_idf_sys::*;

        #[derive(Clone)]
        pub struct Config {
            pub duration: core::time::Duration,
            pub panic_on_trigger: bool,
            pub subscribed_idle_tasks: enumset::EnumSet<crate::cpu::Core>,
        }

        impl Config {
            // Could be const if enumset operations are const
            pub fn new() -> Self {
                #[cfg(esp_idf_esp_task_wdt)]
                let duration = core::time::Duration::from_secs(
                    esp_idf_sys::CONFIG_ESP_TASK_WDT_TIMEOUT_S as u64,
                );
                #[cfg(not(esp_idf_esp_task_wdt))]
                let duration = core::time::Duration::from_secs(5);
                Self {
                    duration,
                    panic_on_trigger: cfg!(esp_idf_esp_task_wdt_panic),
                    subscribed_idle_tasks: {
                        let mut subscribed_idle_tasks = enumset::EnumSet::empty();
                        if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu0) {
                            subscribed_idle_tasks |= crate::cpu::Core::Core0;
                        }
                        #[cfg(any(esp32, esp32s3))]
                        if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu1) {
                            subscribed_idle_tasks |= crate::cpu::Core::Core1;
                        }
                        subscribed_idle_tasks
                    },
                }
            }
        }

        impl Default for Config {
            fn default() -> Self {
                Self::new()
            }
        }

        #[cfg(not(esp_idf_version_major = "4"))]
        impl From<&Config> for esp_task_wdt_config_t {
            fn from(config: &Config) -> Self {
                esp_task_wdt_config_t {
                    timeout_ms: config.duration.as_millis() as u32,
                    trigger_panic: config.panic_on_trigger,
                    idle_core_mask: config.subscribed_idle_tasks.as_u32(),
                }
            }
        }
    }

    pub struct TWDTDriver<'d> {
        init_by_idf: bool,
        _marker: PhantomData<&'d mut ()>,
    }

    static TWDT_DRIVER_REF_COUNT: AtomicUsize = AtomicUsize::new(0);

    impl<'d> TWDTDriver<'d> {
        pub fn new(
            _twdt: impl Peripheral<P = TWDT> + 'd,
            config: &config::Config,
        ) -> Result<Self, EspError> {
            TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
            let init_by_idf = Self::watchdog_is_init_by_idf();

            #[cfg(not(esp_idf_version_major = "4"))]
            if !init_by_idf {
                esp!(unsafe { esp_task_wdt_init(&config.into() as *const esp_task_wdt_config_t) })?;
            } else {
                esp!(unsafe {
                    esp_task_wdt_reconfigure(&config.into() as *const esp_task_wdt_config_t)
                })?;
            }

            #[cfg(esp_idf_version_major = "4")]
            esp!(unsafe {
                esp_task_wdt_init(config.duration.as_secs() as u32, config.panic_on_trigger)
            })?;

            #[cfg(esp_idf_version_major = "4")]
            if let Err(e) = Self::subscribe_idle_tasks(config.subscribed_idle_tasks) {
                // error task already subscribed could occur but it's ok (not checking if tasks already subscribed before)
                if e.code() != ESP_ERR_INVALID_ARG {
                    return Err(e);
                }
            }

            Ok(Self {
                init_by_idf,
                _marker: Default::default(),
            })
        }

        pub fn watch_current_task(&mut self) -> Result<WatchdogSubscription<'_>, EspError> {
            esp!(unsafe { esp_task_wdt_add(core::ptr::null_mut()) })?;
            Ok(WatchdogSubscription::new())
        }

        #[cfg(esp_idf_version_major = "4")]
        fn subscribe_idle_tasks(cores: enumset::EnumSet<crate::cpu::Core>) -> Result<(), EspError> {
            for core in cores {
                let task = super::get_idle_task(core);
                esp!(unsafe { esp_task_wdt_add(task) })?;
            }

            Ok(())
        }

        #[cfg(esp_idf_version_major = "4")]
        fn unsubscribe_idle_tasks() -> Result<(), EspError> {
            for core in enumset::EnumSet::<crate::cpu::Core>::all() {
                let task = super::get_idle_task(core);
                esp!(unsafe { esp_task_wdt_delete(task) })?;
            }

            Ok(())
        }

        fn watchdog_is_init_by_idf() -> bool {
            if cfg!(not(any(
                esp_idf_version_major = "4",
                esp_idf_version = "5.0"
            ))) {
                cfg!(esp_idf_esp_task_wdt_init)
            } else {
                !matches!(
                    unsafe { esp_task_wdt_status(core::ptr::null_mut()) },
                    ESP_ERR_INVALID_STATE
                )
            }
        }

        fn deinit(&self) -> Result<(), EspError> {
            if !self.init_by_idf {
                #[cfg(esp_idf_version_major = "4")]
                if let Err(e) = Self::unsubscribe_idle_tasks() {
                    // error task not subscribed could occur but it's ok (not checking if tasks subscribed before)
                    if e.code() != ESP_ERR_INVALID_ARG {
                        return Err(e);
                    }
                }
                esp!(unsafe { esp_task_wdt_deinit() }).unwrap();
            }

            Ok(())
        }
    }

    impl Clone for TWDTDriver<'_> {
        fn clone(&self) -> Self {
            TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
            Self {
                init_by_idf: self.init_by_idf,
                _marker: Default::default(),
            }
        }
    }

    impl Drop for TWDTDriver<'_> {
        fn drop(&mut self) {
            let refcnt = TWDT_DRIVER_REF_COUNT.fetch_sub(1, Ordering::SeqCst);
            match refcnt {
                1 => self.deinit().unwrap(),
                r if r < 1 => unreachable!(), // Bug, should never happen
                _ => (),
            }
        }
    }

    unsafe impl Send for TWDTDriver<'_> {}

    pub struct WatchdogSubscription<'s>(PhantomData<&'s mut ()>);

    impl WatchdogSubscription<'_> {
        fn new() -> Self {
            Self(Default::default())
        }

        pub fn feed(&mut self) -> Result<(), EspError> {
            esp!(unsafe { esp_task_wdt_reset() })
        }
    }

    impl embedded_hal_0_2::watchdog::Watchdog for WatchdogSubscription<'_> {
        fn feed(&mut self) {
            Self::feed(self).unwrap()
        }
    }

    impl Drop for WatchdogSubscription<'_> {
        fn drop(&mut self) {
            esp!(unsafe { esp_task_wdt_delete(core::ptr::null_mut()) }).unwrap();
        }
    }

    crate::impl_peripheral!(TWDT);
}

#[cfg(feature = "critical-section")]
pub mod critical_section {
    static CS: super::CriticalSection = super::CriticalSection::new();

    pub struct EspCriticalSection {}

    unsafe impl critical_section::Impl for EspCriticalSection {
        unsafe fn acquire() {
            super::enter(&CS);
        }

        unsafe fn release(_token: ()) {
            super::exit(&CS);
        }
    }

    pub type LinkWorkaround = [*mut (); 2];

    static mut __INTERNAL_REFERENCE: LinkWorkaround = [
        _critical_section_1_0_acquire as *mut _,
        _critical_section_1_0_release as *mut _,
    ];

    pub fn link() -> LinkWorkaround {
        unsafe { __INTERNAL_REFERENCE }
    }

    critical_section::set_impl!(EspCriticalSection);
}

pub mod embassy_sync {
    use embassy_sync::blocking_mutex::raw::RawMutex;

    /// A mutex that allows borrowing data across executors but NOT accross interrupts.
    ///
    /// # Safety
    ///
    /// This mutex is safe to share between different executors.
    pub struct EspRawMutex(super::CriticalSection);

    unsafe impl Send for EspRawMutex {}
    unsafe impl Sync for EspRawMutex {}

    impl EspRawMutex {
        /// Create a new `EspRawMutex`.
        pub const fn new() -> Self {
            Self(super::CriticalSection::new())
        }
    }

    unsafe impl RawMutex for EspRawMutex {
        #[allow(clippy::declare_interior_mutable_const)]
        const INIT: Self = Self::new();

        fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
            let _guard = self.0.enter();

            f()
        }
    }
}

#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
pub mod notification {
    use core::marker::PhantomData;
    use core::num::NonZeroU32;
    use core::sync::atomic::{AtomicPtr, Ordering};

    extern crate alloc;
    use alloc::sync::Arc;
    use alloc::task::Wake;

    use esp_idf_sys::TickType_t;

    use crate::task;

    #[cfg(esp_idf_version_major = "4")]
    type Task = core::ffi::c_void;

    #[cfg(not(esp_idf_version_major = "4"))]
    type Task = esp_idf_sys::tskTaskControlBlock;

    pub struct Notification(Arc<Notifier>, PhantomData<*const ()>);

    impl Notification {
        pub fn new() -> Self {
            Self(
                Arc::new(Notifier(AtomicPtr::new(task::current().unwrap()))),
                PhantomData,
            )
        }

        pub fn notifier(&self) -> Arc<Notifier> {
            self.0.clone()
        }

        pub fn wait_any(&self) {
            loop {
                if task::wait_notification(crate::delay::BLOCK).is_some() {
                    break;
                }
            }
        }

        pub fn wait(&self, timeout: TickType_t) -> Option<NonZeroU32> {
            task::wait_notification(timeout)
        }
    }

    impl Default for Notification {
        fn default() -> Self {
            Self::new()
        }
    }

    pub struct Notifier(AtomicPtr<Task>);

    impl Notifier {
        /// # Safety
        ///
        /// This method is unsafe because it is possible to call `core::mem::forget` on the Monitor instance
        /// that produced this notifier.
        ///
        /// If that happens, the `Drop` dtor of `Monitor` will NOT be called, which - in turn - means that the
        /// `Arc` holding the task reference will stick around even when the actual task where the `Monitor` instance was
        /// created no longer exists. Which - in turn - would mean that the method will be trying to notify a task
        /// which does no longer exist, which would lead to UB and specifically - to memory corruption.
        pub unsafe fn notify(&self, notification: NonZeroU32) -> (bool, bool) {
            let freertos_task = self.0.load(Ordering::SeqCst);

            if !freertos_task.is_null() {
                return unsafe { task::notify(freertos_task, notification) };
            }

            (false, false)
        }

        /// # Safety
        ///
        /// This method is unsafe because it is possible to call `core::mem::forget` on the Monitor instance
        /// that produced this notifier.
        ///
        /// If that happens, the `Drop` dtor of `Monitor` will NOT be called, which - in turn - means that the
        /// `Arc` holding the task reference will stick around even when the actual task where the `Monitor` instance was
        /// created no longer exists. Which - in turn - would mean that the method will be trying to notify a task
        /// which does no longer exist, which would lead to UB and specifically - to memory corruption.
        pub unsafe fn notify_and_yield(&self, notification: NonZeroU32) -> bool {
            let freertos_task = self.0.load(Ordering::SeqCst);

            if !freertos_task.is_null() {
                unsafe { task::notify_and_yield(freertos_task, notification) }
            } else {
                false
            }
        }
    }

    impl Wake for Notifier {
        fn wake(self: Arc<Self>) {
            unsafe {
                self.notify_and_yield(NonZeroU32::new(1).unwrap());
            }
        }
    }
}

pub mod queue {
    use core::{
        marker::PhantomData,
        mem::{size_of, MaybeUninit},
    };

    use esp_idf_sys::{EspError, TickType_t, ESP_FAIL};

    use crate::sys;

    /// Thin wrapper on top of the FreeRTOS queue.
    ///
    /// This may be preferable over a Rust channel
    /// in cases where an ISR needs to send or receive
    /// data as it is safe to use in ISR contexts.
    pub struct Queue<T> {
        ptr: sys::QueueHandle_t,
        is_owned: bool,
        _marker: PhantomData<T>,
    }

    unsafe impl<T> Send for Queue<T> where T: Send + Sync {}
    unsafe impl<T> Sync for Queue<T> where T: Send + Sync {}

    impl<T> Queue<T>
    where
        // ensures the contained elements are not `Drop`
        // might be able to lift restriction in the future
        T: Copy,
    {
        /// Allocate a new queue on the heap.
        pub fn new(size: usize) -> Self {
            Queue {
                ptr: unsafe { sys::xQueueGenericCreate(size as u32, size_of::<T>() as u32, 0) },
                is_owned: true,
                _marker: PhantomData,
            }
        }

        /// Create a new queue which is not deleted on `Drop`, but owned by somebody else.
        ///
        /// # Safety
        ///
        /// Care must be taken that the queue is valid for the constructed
        /// lifetime.
        pub unsafe fn new_borrowed(ptr: sys::QueueHandle_t) -> Self {
            assert!(!ptr.is_null());

            Queue {
                ptr,
                is_owned: false,
                _marker: PhantomData,
            }
        }

        /// Retrieves the underlying FreeRTOS handle.
        #[inline]
        #[link_section = "iram1.queue_as_raw"]
        pub fn as_raw(&self) -> sys::QueueHandle_t {
            self.ptr
        }

        /// Copy item to back of queue, blocking for `timeout` ticks if full.
        ///
        /// # ISR safety
        ///
        /// This function is safe to call in ISR contexts.
        ///
        /// # Parameters
        ///
        /// * `item` the item to push onto the back of the queue
        /// * `timeout` specifies how long to block. Ignored in ISR context.
        ///
        /// # Returns
        ///
        /// Will return an error if queue is full.
        /// If this function is executed in an ISR context,
        /// it will return true if a higher priority task was awoken.
        /// In non-ISR contexts, the function will always return `false`.
        /// In this case the interrupt should call [`crate::task::do_yield`].
        #[inline]
        #[link_section = "iram1.queue_send_back"]
        pub fn send_back(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
            self.send_generic(item, timeout, 0)
        }

        /// Copy item to front of queue, blocking for `timeout` ticks if full.
        /// This can be used for hight priority messages which should be processed
        /// sooner.
        ///
        /// # ISR safety
        ///
        /// This function is safe to call in ISR contexts.
        ///
        /// # Parameters
        ///
        /// * `item` the item to push to front of the queue
        /// * `timeout` specifies how long to block. Ignored in ISR context.
        ///
        /// # Returns
        ///
        /// Will return an error if queue is full.
        /// If this function is executed in an ISR context,
        /// it will return true if a higher priority task was awoken.
        /// In non-ISR contexts, the function will always return `false`.
        /// In this case the interrupt should call [`crate::task::do_yield`].
        #[inline]
        #[link_section = "iram1.queue_send_front"]
        pub fn send_front(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
            self.send_generic(item, timeout, 1)
        }

        /// Copy item to queue, blocking for `timeout` ticks if full.
        ///
        /// # ISR safety
        ///
        /// This function is safe to call in ISR contexts.
        ///
        /// # Parameters
        ///
        /// * `item` the item to push to the queue
        /// * `timeout` specifies how long to block. Ignored in ISR context.
        /// * `copy_position` 0 to push onto back, 1 to push to front
        ///
        /// # Returns
        ///
        /// Will return an error if queue is full.
        /// If this function is executed in an ISR context,
        /// it will return true if a higher priority task was awoken.
        /// In non-ISR contexts, the function will always return `false`.
        /// In this case the interrupt should call [`crate::task::do_yield`].
        #[inline]
        #[link_section = "iram1.queue_send_generic"]
        fn send_generic(
            &self,
            item: T,
            timeout: TickType_t,
            copy_position: i32,
        ) -> Result<bool, EspError> {
            let mut hp_task_awoken: i32 = false as i32;
            let success = unsafe {
                if crate::interrupt::active() {
                    sys::xQueueGenericSendFromISR(
                        self.ptr,
                        &item as *const T as *const _,
                        &mut hp_task_awoken,
                        copy_position,
                    )
                } else {
                    sys::xQueueGenericSend(
                        self.ptr,
                        &item as *const T as *const _,
                        timeout,
                        copy_position,
                    )
                }
            };
            let success = success == 1;
            let hp_task_awoken = hp_task_awoken == 1;

            match success {
                true => Ok(hp_task_awoken),
                false => Err(EspError::from_infallible::<ESP_FAIL>()),
            }
        }

        /// Receive a message from the queue and remove it.
        ///
        /// # ISR safety
        ///
        /// This function is safe to use in ISR contexts
        ///
        /// # Parameters
        ///
        /// * `timeout` specifies how long to block. Ignored in ISR contexts.
        ///
        /// # Returns
        ///
        /// * `None` if no message could be received in time
        /// * `Some((message, higher_priority_task_awoken))` otherwise
        ///
        /// The boolean is used for ISRs and indicates if a higher priority task was awoken.
        /// In this case the interrupt should call [`crate::task::do_yield`].
        /// In non-ISR contexts, the function will always return `false`.
        #[inline]
        #[link_section = "iram1.queue_recv_front"]
        pub fn recv_front(&self, timeout: TickType_t) -> Option<(T, bool)> {
            let mut buf = MaybeUninit::uninit();
            let mut hp_task_awoken = false as i32;

            unsafe {
                let success = if crate::interrupt::active() {
                    sys::xQueueReceiveFromISR(
                        self.ptr,
                        buf.as_mut_ptr() as *mut _,
                        &mut hp_task_awoken,
                    )
                } else {
                    sys::xQueueReceive(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
                };
                if success == 1 {
                    Some((buf.assume_init(), hp_task_awoken == 1))
                } else {
                    None
                }
            }
        }

        /// Copy the first message from the queue without removing it.
        ///
        /// # ISR safety
        ///
        /// This function is safe to use in ISR contexts
        ///
        /// # Parameters
        ///
        /// * `timeout` specifies how long to block. Ignored in ISR contexts.
        ///
        /// # Returns
        ///
        /// * `None` if no message could be received in time
        /// * `Some(message)` otherwise
        ///
        /// This function does not return a boolean to indicate if
        /// a higher priority task was awoken since we don't free
        /// up space in the queue and thus cannot unblock anyone.
        #[inline]
        #[link_section = "iram1.queue_peek_front"]
        pub fn peek_front(&self, timeout: TickType_t) -> Option<T> {
            let mut buf = MaybeUninit::uninit();

            unsafe {
                let success = if crate::interrupt::active() {
                    sys::xQueuePeekFromISR(self.ptr, buf.as_mut_ptr() as *mut _)
                } else {
                    sys::xQueuePeek(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
                };
                if success == 1 {
                    Some(buf.assume_init())
                } else {
                    None
                }
            }
        }
    }

    impl<T> Drop for Queue<T> {
        fn drop(&mut self) {
            if self.is_owned {
                unsafe { sys::vQueueDelete(self.ptr) }
            }
        }
    }
}

pub mod asynch {
    use core::future::Future;
    use core::num::NonZeroU32;
    use core::sync::atomic::{AtomicU32, Ordering};
    use core::task::{Context, Poll, Waker};

    use atomic_waker::AtomicWaker;

    /// Single-slot lock-free signaling primitive supporting signalling with a `u32` bit-set.
    ///
    /// It is useful for sending data between tasks when the receiver only cares about
    /// the latest data, and therefore it's fine to "lose" messages. This is often the case for "state"
    /// updates.
    ///
    /// The sending part of the primitive is non-blocking, so it is also useful for notifying asynchronous tasks
    /// from contexts where blocking or async wait is not possible.
    ///
    /// Similar in spirit to the ESP-IDF FreeRTOS task notifications in that it is light-weight and operates on bit-sets,
    /// but for synchronization between an asynchronous task, and another one, which might be blocking or asynchronous.
    pub struct Notification {
        waker: AtomicWaker,
        notified: AtomicU32,
    }

    impl Notification {
        /// Creates a new `Notification`.
        pub const fn new() -> Self {
            Self {
                waker: AtomicWaker::new(),
                notified: AtomicU32::new(0),
            }
        }

        /// Marks the least significant bit (bit 0) in this `IsrNotification` as nofified.
        /// Returns `true` if there was a registered waker which got awoken.
        pub fn notify_lsb(&self) -> bool {
            self.notify(NonZeroU32::new(1).unwrap())
        }

        /// Marks the supplied bits in this `Notification` as notified.
        /// Returns `true` if there was a registered waker which got awoken.
        pub fn notify(&self, bits: NonZeroU32) -> bool {
            if let Some(waker) = self.notify_waker(bits) {
                waker.wake();

                true
            } else {
                false
            }
        }

        /// A utility to help in implementing a custom `wait` logic:
        /// Adds the supplied bits as notified in the notification instance and returns the registered waker (if any).
        pub fn notify_waker(&self, bits: NonZeroU32) -> Option<Waker> {
            self.notified.fetch_or(bits.into(), Ordering::SeqCst);

            self.waker.take()
        }

        /// Clears the state of this notification by removing any registered waker and setting all bits to 0.
        pub fn reset(&self) {
            self.waker.take();
            self.notified.store(0, Ordering::SeqCst);
        }

        /// Future that completes when this `Notification` has been notified.
        #[allow(unused)]
        pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
            core::future::poll_fn(move |cx| self.poll_wait(cx))
        }

        /// Non-blocking method to check whether this notification has been notified.
        pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
            self.waker.register(cx.waker());

            let bits = self.notified.swap(0, Ordering::SeqCst);

            if let Some(bits) = NonZeroU32::new(bits) {
                Poll::Ready(bits)
            } else {
                Poll::Pending
            }
        }
    }

    impl Drop for Notification {
        fn drop(&mut self) {
            self.reset();
        }
    }
}