esp_idf_hal/rmt/rx_channel.rs
1use core::fmt;
2use core::marker::PhantomData;
3use core::mem;
4use core::ptr;
5
6use alloc::boxed::Box;
7use alloc::vec::Vec;
8
9use esp_idf_sys::*;
10
11use crate::delay::TickType;
12use crate::gpio::InputPin;
13use crate::interrupt::asynch::HalIsrNotification;
14use crate::rmt::config::{ReceiveConfig, RxChannelConfig};
15use crate::rmt::{assert_not_in_isr, RmtChannel, RxDoneEventData, Symbol};
16use crate::task::queue::Queue;
17
18struct UserData {
19 queue: Queue<RxDoneEventData>,
20 callback: Option<Box<dyn FnMut(RxDoneEventData) + Send + 'static>>,
21 notif: HalIsrNotification,
22}
23
24pub struct RxChannelDriver<'d> {
25 handle: rmt_channel_handle_t,
26 is_enabled: bool,
27 user_data: Box<UserData>,
28 buffer: Vec<Symbol>,
29 has_finished: bool,
30 _p: PhantomData<&'d mut ()>,
31}
32
33impl<'d> RxChannelDriver<'d> {
34 const RX_EVENT_CALLBACKS: rmt_rx_event_callbacks_t = rmt_rx_event_callbacks_t {
35 on_recv_done: Some(Self::handle_isr),
36 };
37 const RX_EVENT_CALLBACKS_DISABLE: rmt_rx_event_callbacks_t =
38 rmt_rx_event_callbacks_t { on_recv_done: None };
39 // There is no need for storing more than one event, because the driver will overwrite the buffer
40 // when a new event arrives.
41 const ISR_QUEUE_SIZE: usize = 1;
42
43 /// Creates a new RMT RX channel.
44 ///
45 /// # Errors
46 ///
47 /// - `ESP_ERR_INVALID_ARG`: Create RMT RX channel failed because of invalid argument
48 /// - `ESP_ERR_NO_MEM`: Create RMT RX channel failed because out of memory
49 /// - `ESP_ERR_NOT_FOUND`: Create RMT RX channel failed because all RMT channels are used up and no more free one
50 /// - `ESP_ERR_NOT_SUPPORTED`: Create RMT RX channel failed because some feature is not supported by hardware, e.g. DMA feature is not supported by hardware
51 /// - `ESP_FAIL`: Create RMT RX channel failed because of other error
52 pub fn new(pin: impl InputPin + 'd, config: &RxChannelConfig) -> Result<Self, EspError> {
53 let sys_config = rmt_rx_channel_config_t {
54 clk_src: config.clock_source.into(),
55 resolution_hz: config.resolution.into(),
56 mem_block_symbols: config.memory_access.symbols(),
57 #[cfg(esp_idf_version_at_least_5_1_2)]
58 intr_priority: config.interrupt_priority,
59 flags: rmt_rx_channel_config_t__bindgen_ty_1 {
60 _bitfield_1: rmt_rx_channel_config_t__bindgen_ty_1::new_bitfield_1(
61 config.invert_in as u32,
62 config.memory_access.is_direct() as u32,
63 #[cfg(not(esp_idf_version_at_least_6_0_0))]
64 {
65 config.io_loop_back as u32
66 },
67 #[cfg(esp_idf_version_at_least_5_4_0)]
68 {
69 config.allow_pd as u32
70 },
71 ),
72 ..Default::default()
73 },
74 gpio_num: pin.pin() as _,
75 };
76
77 let mut handle: rmt_channel_handle_t = ptr::null_mut();
78 esp!(unsafe { rmt_new_rx_channel(&sys_config, &mut handle) })?;
79
80 let mut this = Self {
81 is_enabled: false,
82 handle,
83 user_data: Box::new(UserData {
84 queue: Queue::new(Self::ISR_QUEUE_SIZE),
85 callback: None,
86 notif: HalIsrNotification::new(),
87 }),
88 buffer: Vec::new(),
89 has_finished: true,
90 _p: PhantomData,
91 };
92
93 esp!(unsafe {
94 rmt_rx_register_event_callbacks(
95 handle,
96 &Self::RX_EVENT_CALLBACKS,
97 // SAFETY: The referenced data will not be moved and this is the only reference to it
98 &raw mut (*this.user_data) as *mut core::ffi::c_void,
99 )
100 })?;
101
102 Ok(this)
103 }
104
105 /// Returns whether the last receive operation has finished.
106 ///
107 /// If a timeout happend before the `receive` finished, this will return `false`
108 /// and the data can be obtained on the next call to [`Self::receive`].
109 ///
110 /// For partial receives, this will return `false`, until all parts have been received.
111 ///
112 /// If this function returns `true`, the next call to [`Self::receive`]
113 /// will start a new receive operation.
114 #[must_use]
115 pub const fn has_finished(&self) -> bool {
116 self.has_finished
117 }
118
119 /// Receives RMT symbols into the provided buffer, returning the number of received symbols.
120 ///
121 /// This function will wait until the ISR signals that it has received data. After that,
122 /// it copies the data from the internal buffer to the user-provided buffer and returns
123 /// the number of received symbols.
124 ///
125 /// # Timeouts
126 ///
127 /// A timeout can be specified in the [`ReceiveConfig::timeout`] field. If a timeout happens,
128 /// it will return an error with the [`EspError::code`] [`ESP_ERR_TIMEOUT`].
129 /// If the data arrives after the timeout, it can be obtained on the next call to this function.
130 ///
131 /// # Partial Receives
132 ///
133 /// If partial receives are enabled (see [`ReceiveConfig::enable_partial_rx`]), subsequent
134 /// calls to this function will only start a new receive after all parts have been received.
135 ///
136 /// The driver will continue to write data into the internal buffer after this function returns.
137 /// It is important that this function is called without much delay, to not miss any data.
138 ///
139 /// One can check whether a partial receive has finished with [`Self::has_finished`].
140 pub fn receive(
141 &mut self,
142 buffer: &mut [Symbol],
143 config: &ReceiveConfig,
144 ) -> Result<usize, EspError> {
145 let timeout = config.timeout.map(TickType::from);
146 // If the previous receive operation has finished, start a new one:
147 if self.has_finished {
148 // Ensure the internal buffer is large enough:
149 if self.buffer.len() < buffer.len() {
150 self.buffer.resize(buffer.len(), Symbol::default());
151 }
152
153 let sys_config = rmt_receive_config_t {
154 signal_range_min_ns: config.signal_range_min.as_nanos() as _,
155 signal_range_max_ns: config.signal_range_max.as_nanos() as _,
156 #[cfg(esp_idf_version_at_least_5_3_0)]
157 flags: rmt_receive_config_t_extra_rmt_receive_flags {
158 _bitfield_1: rmt_receive_config_t_extra_rmt_receive_flags::new_bitfield_1(
159 config.enable_partial_rx as u32,
160 ),
161 ..Default::default()
162 },
163 };
164
165 // Enable the channel if it is not enabled yet:
166 if !self.is_enabled() {
167 self.enable()?;
168 }
169
170 // Start a new receive operation, this does not block:
171 let slice = self.buffer.as_mut_slice();
172 // SAFETY: The previous receive operation has finished -> the buffer is not in use.
173 // It is allocated -> it will be valid for the duration of the receive operation.
174 // In case the channel is forgotten, the buffer will be leaked, but the driver can
175 // continue writing to it and when the driver is dropped, it will be disabled first.
176 esp!(unsafe {
177 rmt_receive(
178 self.handle,
179 slice.as_mut_ptr() as *mut _,
180 mem::size_of_val::<[Symbol]>(slice),
181 &sys_config,
182 )
183 })?;
184 }
185
186 // The ISR will send an event through the queue when new data has been received.
187 // For partial receives, it will reuse the initial buffer for subsequent parts.
188
189 self.has_finished = false;
190
191 let event_data = self.wait(timeout)?;
192
193 #[cfg(esp_idf_version_at_least_5_3_0)]
194 {
195 self.has_finished = event_data.is_last;
196 }
197 #[cfg(not(esp_idf_version_at_least_5_3_0))]
198 {
199 self.has_finished = true;
200 }
201
202 let read = event_data.num_symbols;
203 // Before returning, copy the data to the user buffer:
204 buffer[..read].copy_from_slice(&self.buffer[..read]);
205
206 Ok(read)
207 }
208
209 /// Receives RMT symbols into the provided buffer, returning the number of received symbols.
210 ///
211 /// For more details, see [`RxChannelDriver::receive`].
212 ///
213 /// # Cancel Safety
214 ///
215 /// If the future is cancelled before completion, the received data can be obtained on the next call
216 /// to this function. Make sure that the buffer provided on the next call is large enough to hold
217 /// the data.
218 pub async fn receive_async(
219 &mut self,
220 buffer: &mut [Symbol],
221 config: &ReceiveConfig,
222 ) -> Result<usize, EspError> {
223 let config_without_timeout = ReceiveConfig {
224 timeout: None,
225 ..config.clone()
226 };
227
228 loop {
229 match self.receive(buffer, &config_without_timeout) {
230 Ok(n) => return Ok(n),
231 Err(err) if err.code() == ESP_ERR_TIMEOUT => {
232 // Wait for the ISR to notify us that new data has arrived:
233 self.user_data.notif.wait().await;
234 }
235 Err(err) => return Err(err),
236 }
237 }
238 }
239
240 fn wait(&mut self, timeout: Option<TickType>) -> Result<RxDoneEventData, EspError> {
241 let has_timeout = timeout.is_some();
242 let ticks = timeout.map_or(u32::MAX, |tick| tick.ticks());
243 loop {
244 if let Some((data, _)) = self.user_data.queue.recv_front(ticks) {
245 return Ok(data);
246 }
247
248 if has_timeout {
249 return Err(EspError::from_infallible::<ESP_ERR_TIMEOUT>());
250 }
251 }
252 }
253
254 /// Subscribe to the ISR handler to get notified when a receive operation is done.
255 ///
256 /// There is only one callback possible, you can not subscribe multiple callbacks.
257 ///
258 /// # Panics
259 ///
260 /// This function will panic if called from an ISR context or while the channel is enabled.
261 ///
262 /// # Safety
263 ///
264 /// Care should be taken not to call std, libc or FreeRTOS APIs (except for a few allowed ones)
265 /// in the callback passed to this function, as it is executed in an ISR context.
266 ///
267 /// You are not allowed to block, but you are allowed to call FreeRTOS APIs with the FromISR suffix.
268 pub unsafe fn subscribe(&mut self, on_recv_done: impl FnMut(RxDoneEventData) + Send + 'static) {
269 assert_not_in_isr();
270
271 if self.is_enabled() {
272 panic!("Cannot subscribe to RX events while the channel is enabled");
273 }
274
275 self.user_data.callback = Some(Box::new(on_recv_done));
276 }
277
278 /// Remove the ISR handler for when a transmission is done.
279 pub fn unsubscribe(&mut self) {
280 assert_not_in_isr();
281 if self.is_enabled() {
282 panic!("Cannot unsubscribe from RX events while the channel is enabled");
283 }
284
285 self.user_data.callback = None;
286 }
287
288 /// Handles the ISR event for when a transmission is done.
289 unsafe extern "C" fn handle_isr(
290 _channel: rmt_channel_handle_t,
291 event_data: *const rmt_rx_done_event_data_t,
292 user_data: *mut core::ffi::c_void,
293 ) -> bool {
294 let data = RxDoneEventData::from(event_data.read());
295 let user_data = &mut *(user_data as *mut UserData);
296
297 if let Some(handler) = user_data.callback.as_mut() {
298 handler(data);
299 }
300
301 let raw_queue = &user_data.queue;
302
303 // timeout is only relevant for non-isr calls
304 let result = match raw_queue.send_back(data, 0) {
305 Ok(result) => result,
306 // it might fail if the queue is full, in this case discard the oldest event:
307 Err(_) => {
308 raw_queue.recv_front(0);
309 raw_queue.send_back(data, 0).unwrap()
310 }
311 };
312
313 user_data.notif.notify_lsb();
314
315 result
316 }
317}
318
319impl<'d> RmtChannel for RxChannelDriver<'d> {
320 fn handle(&self) -> rmt_channel_handle_t {
321 self.handle
322 }
323
324 fn is_enabled(&self) -> bool {
325 self.is_enabled
326 }
327
328 unsafe fn set_internal_enabled(&mut self, is_enabled: bool) {
329 self.is_enabled = is_enabled;
330
331 if !self.is_enabled {
332 // If the channel is disabled, any receive operation has been aborted
333 // -> it is finished:
334 self.has_finished = true;
335 }
336 }
337}
338
339impl<'d> Drop for RxChannelDriver<'d> {
340 fn drop(&mut self) {
341 // Deleting the channel might fail if it is not disabled first.
342 //
343 // The result is ignored here, because there is nothing we can do about it.
344 if self.is_enabled() {
345 let _res = self.disable();
346 }
347
348 // Disable the callback to prevent use-after-free bugs:
349 unsafe {
350 rmt_rx_register_event_callbacks(
351 self.handle(),
352 &Self::RX_EVENT_CALLBACKS_DISABLE,
353 ptr::null_mut(),
354 )
355 };
356
357 unsafe { rmt_del_channel(self.handle) };
358 }
359}
360
361impl<'d> fmt::Debug for RxChannelDriver<'d> {
362 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
363 f.debug_struct("RxChannelDriver")
364 .field("is_enabled", &self.is_enabled)
365 .field("handle", &self.handle)
366 .field("has_finished", &self.has_finished)
367 .finish()
368 }
369}