Skip to main content

esp_idf_hal/rmt/
tx_channel.rs

1use core::fmt;
2use core::marker::PhantomData;
3use core::mem;
4use core::ptr;
5use core::sync::atomic::{AtomicUsize, Ordering};
6use core::time::Duration;
7
8use alloc::boxed::Box;
9use esp_idf_sys::*;
10
11use crate::gpio::OutputPin;
12use crate::interrupt::asynch::HalIsrNotification;
13use crate::rmt::config::{Loop, TransmitConfig, TxChannelConfig};
14use crate::rmt::encoder::{into_raw, Encoder, RawEncoder};
15use crate::rmt::tx_queue::TxQueue;
16use crate::rmt::TxDoneEventData;
17use crate::rmt::{assert_not_in_isr, EncoderBuffer, RmtChannel};
18
19struct UserData<'d> {
20    callback: Option<Box<dyn FnMut(TxDoneEventData) + Send + 'd>>,
21    queue_size: AtomicUsize,
22    queue_has_progressed: HalIsrNotification,
23}
24
25pub struct TxChannelDriver<'d> {
26    // SAFETY: The unsafe code relies on this field to accurately reflect the channel state.
27    //         It relies on the fact that in disabled state, the ISR handler will not be called.
28    is_enabled: bool,
29    handle: rmt_channel_handle_t,
30    on_transmit_data: Box<UserData<'d>>,
31    _p: PhantomData<&'d mut ()>,
32}
33
34impl<'d> TxChannelDriver<'d> {
35    const TX_EVENT_CALLBACKS: rmt_tx_event_callbacks_t = rmt_tx_event_callbacks_t {
36        on_trans_done: Some(Self::handle_isr),
37    };
38    const TX_EVENT_CALLBACKS_DISABLE: rmt_tx_event_callbacks_t = rmt_tx_event_callbacks_t {
39        on_trans_done: None,
40    };
41
42    /// Creates a new RMT TX channel.
43    ///
44    /// # Note
45    ///
46    /// When multiple RMT channels are allocated at the same time,
47    /// the group’s prescale is determined based on the resolution of
48    /// the first channel. The driver then selects the appropriate prescale
49    /// from low to high. To avoid prescale conflicts when allocating multiple
50    /// channels, allocate channels in order of their target resolution,
51    /// either from highest to lowest or lowest to highest.
52    ///
53    /// # Panics
54    ///
55    /// This function will panic if called from an ISR context.
56    pub fn new(pin: impl OutputPin + 'd, config: &TxChannelConfig) -> Result<Self, EspError> {
57        assert_not_in_isr();
58
59        let sys_config: rmt_tx_channel_config_t = rmt_tx_channel_config_t {
60            clk_src: config.clock_source.into(),
61            resolution_hz: config.resolution.into(),
62            mem_block_symbols: config.memory_access.symbols(),
63            trans_queue_depth: config.transaction_queue_depth,
64            #[cfg(esp_idf_version_at_least_5_1_2)]
65            intr_priority: config.interrupt_priority,
66            flags: rmt_tx_channel_config_t__bindgen_ty_1 {
67                _bitfield_1: rmt_tx_channel_config_t__bindgen_ty_1::new_bitfield_1(
68                    config.invert_out as u32,
69                    config.memory_access.is_direct() as u32,
70                    #[cfg(not(esp_idf_version_at_least_6_0_0))]
71                    {
72                        config.io_loop_back as u32
73                    },
74                    #[cfg(not(esp_idf_version_at_least_6_0_0))]
75                    {
76                        config.io_od_mode as u32
77                    },
78                    #[cfg(esp_idf_version_at_least_5_4_0)]
79                    {
80                        config.allow_pd as u32
81                    },
82                    #[cfg(any(
83                        esp_idf_version_patch_at_least_5_4_3,
84                        esp_idf_version_at_least_5_5_2,
85                        esp_idf_version_at_least_6_0_0
86                    ))]
87                    {
88                        0
89                    },
90                ),
91                ..Default::default()
92            },
93            gpio_num: pin.pin() as _,
94        };
95        let mut handle: rmt_channel_handle_t = ptr::null_mut();
96        esp!(unsafe { rmt_new_tx_channel(&sys_config, &mut handle) })?;
97
98        #[cfg_attr(not(feature = "alloc"), allow(unused_mut))]
99        let mut this = Self {
100            is_enabled: false,
101            handle,
102            on_transmit_data: Box::new(UserData {
103                callback: None,
104                queue_size: AtomicUsize::new(0),
105                queue_has_progressed: HalIsrNotification::new(),
106            }),
107            _p: PhantomData,
108        };
109
110        // The callback is used to detect when a transmission is finished.
111        esp!(unsafe {
112            rmt_tx_register_event_callbacks(
113                handle,
114                &Self::TX_EVENT_CALLBACKS,
115                (&raw mut *this.on_transmit_data) as *mut core::ffi::c_void,
116            )
117        })?;
118
119        Ok(this)
120    }
121
122    /// Wait for all pending TX transactions to finish.
123    ///
124    /// If `timeout` is `None`, it will wait indefinitely. If `timeout` is `Some(duration)`,
125    /// it will wait for at most `duration`.
126    ///
127    /// # Note
128    ///
129    /// This function will block forever if the pending transaction can't
130    /// be finished within a limited time (e.g. an infinite loop transaction).
131    /// See also [`Self::disable`] for how to terminate a working channel.
132    ///
133    /// If the given `timeout` converted to milliseconds is larger than `i32::MAX`,
134    /// it will be treated as `None` (wait indefinitely).
135    ///
136    /// # Errors
137    ///
138    /// - `ESP_ERR_INVALID_ARG`: Flush transactions failed because of invalid argument
139    /// - `ESP_FAIL`: Flush transactions failed because of other error
140    ///
141    /// # Polling
142    ///
143    /// When polling this function (calling with a timeout duration of 0ms),
144    /// esp-idf will log flush timeout errors to the console.
145    /// This issue is tracked in <https://github.com/espressif/esp-idf/issues/17527>
146    /// and should be fixed in future esp-idf versions.
147    pub fn wait_all_done(&mut self, timeout: Option<Duration>) -> Result<(), EspError> {
148        esp!(unsafe {
149            rmt_tx_wait_all_done(
150                self.handle,
151                timeout.map_or(-1, |duration| duration.as_millis().try_into().unwrap_or(-1)),
152            )
153        })
154    }
155
156    /// Define the ISR handler for when a transmission is done.
157    ///
158    /// The callback will be called with the number of transmitted symbols, including one EOF symbol,
159    /// which is appended by the driver to mark the end of the transmission. For a loop transmission,
160    /// this value only counts for one round.
161    ///
162    /// There is only one callback possible, you can not subscribe multiple callbacks.
163    ///
164    /// # Panics
165    ///
166    /// This function will panic if called from an ISR context or while the channel is enabled.
167    ///
168    /// # ISR Safety
169    ///
170    /// Care should be taken not to call std, libc or FreeRTOS APIs (except for a few allowed ones)
171    /// in the callback passed to this function, as it is executed in an ISR context.
172    ///
173    /// You are not allowed to block, but you are allowed to call FreeRTOS APIs with the FromISR suffix.
174    pub fn subscribe(&mut self, callback: impl FnMut(TxDoneEventData) + Send + 'static) {
175        // SAFETY: because of 'static lifetime, it doesn't matter if mem::forget is called on the driver
176        unsafe {
177            self.subscribe_nonstatic(callback);
178        }
179    }
180
181    /// Subscribe a non-'static callback for when a transmission is done.
182    ///
183    /// # Safety
184    ///
185    /// You must not forget the channel driver (for example through [`mem::forget`]),
186    /// while the callback is still subscribed, otherwise this would lead to undefined behavior.
187    pub unsafe fn subscribe_nonstatic(
188        &mut self,
189        callback: impl FnMut(TxDoneEventData) + Send + 'd,
190    ) {
191        assert_not_in_isr();
192        if self.is_enabled() {
193            panic!("Can't subscribe while the channel is enabled");
194        }
195
196        self.on_transmit_data.callback = Some(Box::new(callback));
197    }
198
199    /// Remove the ISR handler for when a transmission is done.
200    ///
201    /// # Panics
202    ///
203    /// This function will panic if called from an ISR context or while the channel is enabled.
204    pub fn unsubscribe(&mut self) {
205        assert_not_in_isr();
206        if self.is_enabled() {
207            panic!("Can't unsubscribe while the channel is enabled");
208        }
209
210        self.on_transmit_data.callback = None;
211    }
212
213    /// Handles the ISR event for when a transmission is done.
214    unsafe extern "C" fn handle_isr(
215        _channel: rmt_channel_handle_t,
216        event_data: *const rmt_tx_done_event_data_t,
217        user_data: *mut core::ffi::c_void,
218    ) -> bool {
219        let event_data = TxDoneEventData::from(event_data.read());
220        let user_data = &mut *(user_data as *mut UserData<'d>);
221
222        user_data.queue_size.fetch_sub(1, Ordering::SeqCst);
223        if let Some(handler) = user_data.callback.as_mut() {
224            handler(event_data);
225        }
226
227        user_data.queue_has_progressed.notify_lsb()
228    }
229
230    /// Starts transmitting the signal using the specified encoder and config.
231    ///
232    /// # Safety
233    ///
234    /// This function is a thin wrapper around the `rmt_transmit` function, it assumes that
235    /// - the encoder (the returned pointer of [`RawEncoder::handle`]) is valid until the transmission
236    ///   is done, if not, it is guaranteed to crash
237    /// - the signal is valid until the transmission is done
238    /// - the encoder and signal are not modified during the transmission
239    ///
240    /// The caller must ensure that the encoder and signal **live long enough** and are **not moved**.
241    pub unsafe fn start_send<E: RawEncoder>(
242        &mut self,
243        encoder: &mut E,
244        signal: &[E::Item],
245        config: &TransmitConfig,
246    ) -> Result<(), EspError> {
247        if !self.is_enabled() {
248            self.enable()?;
249        }
250
251        let sys_config = rmt_transmit_config_t {
252            loop_count: match config.loop_count {
253                Loop::Count(value) => value as i32,
254                Loop::Endless => -1,
255                Loop::None => 0,
256            },
257            flags: rmt_transmit_config_t__bindgen_ty_1 {
258                _bitfield_1: rmt_transmit_config_t__bindgen_ty_1::new_bitfield_1(
259                    config.eot_level as u32,
260                    #[cfg(esp_idf_version_at_least_5_1_3)]
261                    {
262                        config.queue_non_blocking as u32
263                    },
264                ),
265                ..Default::default()
266            },
267        };
268
269        esp!(unsafe {
270            rmt_transmit(
271                self.handle(),
272                encoder.handle(),
273                signal.as_ptr() as *const core::ffi::c_void,
274                // size should be given in bytes:
275                mem::size_of_val::<[E::Item]>(signal),
276                &sys_config,
277            )
278        })?;
279
280        self.on_transmit_data
281            .queue_size
282            .fetch_add(1, Ordering::SeqCst);
283
284        Ok(())
285    }
286
287    /// Transmits the signals provided by the iterator using the specified encoder and config.
288    ///
289    /// This is a convenience function that will create a [`TxQueue`], push all signals from the iterator
290    /// to the queue, and then drop the queue, waiting for all transmissions to finish.
291    ///
292    /// # Non blocking behavior
293    ///
294    /// It is not recommended to use this function with [`TransmitConfig::queue_non_blocking`] set to true,
295    /// because it will drop the queue if it would block, resulting in it blocking until all pending transmissions
296    /// are done.
297    ///
298    /// Therefore, one should use [`TxChannelDriver::queue`] for a non-blocking use case.
299    pub fn send_iter<E: Encoder, S: AsRef<[E::Item]>>(
300        &mut self,
301        encoders: impl IntoIterator<Item = E>,
302        iter: impl Iterator<Item = S>,
303        config: &TransmitConfig,
304    ) -> Result<(), EspError>
305    where
306        E::Item: Clone,
307    {
308        let mut pending = TxQueue::new(
309            encoders
310                .into_iter()
311                .map(|encoder| EncoderBuffer::new(into_raw(encoder)))
312                .collect(),
313            self,
314        );
315
316        for signal in iter {
317            pending.push(signal.as_ref(), config)?;
318        }
319
320        // The remaining pending transmissions will be awaited in the drop of the queue.
321
322        Ok(())
323    }
324
325    /// Creates a new queue for transmitting multiple signals with the given encoders.
326    ///
327    /// For more information, see [`TxQueue`].
328    ///
329    /// # Panics
330    ///
331    /// If no encoders are provided.
332    #[must_use]
333    pub fn queue<E: Encoder>(
334        &mut self,
335        encoders: impl IntoIterator<Item = E>,
336    ) -> TxQueue<'_, 'd, E> {
337        TxQueue::new(
338            encoders
339                .into_iter()
340                .map(|encoder| EncoderBuffer::new(into_raw(encoder)))
341                .collect(),
342            self,
343        )
344    }
345
346    /// Asynchronously waits until the next pending transmission has finished.
347    ///
348    /// If there are no pending transmissions, this function will wait indefinitely.
349    pub async fn wait_for_progress(&self) {
350        self.on_transmit_data.queue_has_progressed.wait().await;
351    }
352
353    /// Returns the number of currently pending transmissions.
354    ///
355    /// This will be updated when a transmission is started or finished.
356    pub fn queue_size(&self) -> usize {
357        self.on_transmit_data.queue_size.load(Ordering::SeqCst)
358    }
359
360    /// Transmits the signal and waits for the transmission to finish.
361    ///
362    /// If the channel is not enabled yet, it will be enabled automatically.
363    ///
364    /// # Queue blocking behavior
365    ///
366    /// This function constructs a transaction descriptor then pushes to a queue. The transaction
367    /// will not start immediately if there's another one under processing. Based on the setting
368    /// of [`TransmitConfig::queue_non_blocking`], if there're too many transactions pending in the
369    /// queue, this function can block until it has free slot, otherwise just return quickly.
370    ///
371    /// # Errors
372    ///
373    /// - `ESP_ERR_INVALID_ARG`: Transmit failed because of invalid argument
374    /// - `ESP_ERR_NOT_SUPPORTED`: Some feature is not supported by hardware e.g. unsupported loop count
375    /// - `ESP_FAIL`: Because of other errors
376    pub fn send_and_wait<E: Encoder>(
377        &mut self,
378        encoder: E,
379        signal: &[E::Item],
380        config: &TransmitConfig,
381    ) -> Result<(), EspError>
382    where
383        E::Item: Clone,
384    {
385        self.send_iter([encoder], core::iter::once(signal), config)
386    }
387}
388
389// SAFETY: The C code doesn't seem to use any thread locals -> it should be safe to send the channel to another thread.
390unsafe impl<'d> Send for TxChannelDriver<'d> {}
391
392// SAFETY: All non-thread-safe methods require exclusive access to self
393// -> it is safe to send &TxChannelDriver to another thread.
394// -> it is safe to implement Sync
395unsafe impl<'d> Sync for TxChannelDriver<'d> {}
396
397impl<'d> RmtChannel for TxChannelDriver<'d> {
398    fn handle(&self) -> rmt_channel_handle_t {
399        self.handle
400    }
401
402    fn is_enabled(&self) -> bool {
403        self.is_enabled
404    }
405
406    unsafe fn set_internal_enabled(&mut self, is_enabled: bool) {
407        self.is_enabled = is_enabled;
408
409        // If the channel was disabled, all pending transmissions are cancelled
410        if !self.is_enabled {
411            self.on_transmit_data.queue_size.store(0, Ordering::SeqCst);
412            self.on_transmit_data.queue_has_progressed.reset();
413        }
414    }
415}
416
417impl<'d> Drop for TxChannelDriver<'d> {
418    fn drop(&mut self) {
419        // Deleting the channel might fail if it is not disabled first.
420        //
421        // The result is ignored here, because there is nothing we can do about it.
422        if self.is_enabled() {
423            let _res = self.disable();
424        }
425
426        // SAFETY: The disable will cancel all pending transmission -> the stored data can be freed
427
428        // Remove the isr handler:
429        let _res = unsafe {
430            rmt_tx_register_event_callbacks(
431                self.handle,
432                &Self::TX_EVENT_CALLBACKS_DISABLE,
433                ptr::null_mut(),
434            )
435        };
436
437        unsafe { rmt_del_channel(self.handle) };
438    }
439}
440
441impl<'d> fmt::Debug for TxChannelDriver<'d> {
442    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
443        f.debug_struct("TxChannelDriver")
444            .field("is_enabled", &self.is_enabled)
445            .field("handle", &self.handle)
446            .finish()
447    }
448}