stm32/uart: refactor rx ringbuffer
- remove some race conditions - allow full use of rx buffer
This commit is contained in:
		
							
								
								
									
										3
									
								
								.vscode/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.vscode/.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -1,3 +1,4 @@ | |||||||
| *.cortex-debug.*.json | *.cortex-debug.*.json | ||||||
| launch.json | launch.json | ||||||
| tasks.json | tasks.json | ||||||
|  | *.cfg | ||||||
|   | |||||||
| @@ -111,24 +111,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index | |||||||
|         panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); |         panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     let mut wake = false; |  | ||||||
|  |  | ||||||
|     if isr.htif(channel_num) && cr.read().htie() { |     if isr.htif(channel_num) && cr.read().htie() { | ||||||
|         // Acknowledge half transfer complete interrupt |         // Acknowledge half transfer complete interrupt | ||||||
|         dma.ifcr().write(|w| w.set_htif(channel_num, true)); |         dma.ifcr().write(|w| w.set_htif(channel_num, true)); | ||||||
|         wake = true; |     } else if isr.tcif(channel_num) && cr.read().tcie() { | ||||||
|     } |  | ||||||
|  |  | ||||||
|     if isr.tcif(channel_num) && cr.read().tcie() { |  | ||||||
|         // Acknowledge transfer complete interrupt |         // Acknowledge transfer complete interrupt | ||||||
|         dma.ifcr().write(|w| w.set_tcif(channel_num, true)); |         dma.ifcr().write(|w| w.set_tcif(channel_num, true)); | ||||||
|         STATE.complete_count[index].fetch_add(1, Ordering::Release); |         STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||||||
|         wake = true; |     } else { | ||||||
|  |         return; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     if wake { |     STATE.ch_wakers[index].wake(); | ||||||
|         STATE.ch_wakers[index].wake(); |  | ||||||
|     } |  | ||||||
| } | } | ||||||
|  |  | ||||||
| #[cfg(any(bdma_v2, dmamux))] | #[cfg(any(bdma_v2, dmamux))] | ||||||
| @@ -371,7 +365,7 @@ impl<'a, C: Channel> Future for Transfer<'a, C> { | |||||||
| struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | ||||||
|  |  | ||||||
| impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | ||||||
|     fn ndtr(&self) -> usize { |     fn get_remaining_transfers(&self) -> usize { | ||||||
|         let ch = self.0.regs().ch(self.0.num()); |         let ch = self.0.regs().ch(self.0.num()); | ||||||
|         unsafe { ch.ndtr().read() }.ndt() as usize |         unsafe { ch.ndtr().read() }.ndt() as usize | ||||||
|     } |     } | ||||||
| @@ -457,21 +451,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Read bytes from the ring buffer |     /// Read bytes from the ring buffer | ||||||
|  |     /// Return a tuple of the length read and the length remaining in the buffer | ||||||
|  |     /// If not all of the bytes were read, then there will be some bytes in the buffer remaining | ||||||
|  |     /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read | ||||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. |     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||||||
|     pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { |     pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||||||
|         self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) |         self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn is_empty(&self) -> bool { |     /// The capacity of the ringbuffer | ||||||
|         self.ringbuf.is_empty() |     pub fn cap(&self) -> usize { | ||||||
|     } |         self.ringbuf.cap() | ||||||
|  |  | ||||||
|     pub fn len(&self) -> usize { |  | ||||||
|         self.ringbuf.len() |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     pub fn capacity(&self) -> usize { |  | ||||||
|         self.ringbuf.dma_buf.len() |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn set_waker(&mut self, waker: &Waker) { |     pub fn set_waker(&mut self, waker: &Waker) { | ||||||
| @@ -506,12 +496,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||||||
|         let ch = self.channel.regs().ch(self.channel.num()); |         let ch = self.channel.regs().ch(self.channel.num()); | ||||||
|         unsafe { ch.cr().read() }.en() |         unsafe { ch.cr().read() }.en() | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Synchronize the position of the ring buffer to the actual DMA controller position |  | ||||||
|     pub fn reload_position(&mut self) { |  | ||||||
|         let ch = self.channel.regs().ch(self.channel.num()); |  | ||||||
|         self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; |  | ||||||
|     } |  | ||||||
| } | } | ||||||
|  |  | ||||||
| impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | ||||||
|   | |||||||
| @@ -187,24 +187,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: | |||||||
|         panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); |         panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     let mut wake = false; |  | ||||||
|  |  | ||||||
|     if isr.htif(channel_num % 4) && cr.read().htie() { |     if isr.htif(channel_num % 4) && cr.read().htie() { | ||||||
|         // Acknowledge half transfer complete interrupt |         // Acknowledge half transfer complete interrupt | ||||||
|         dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); |         dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); | ||||||
|         wake = true; |     } else if isr.tcif(channel_num % 4) && cr.read().tcie() { | ||||||
|     } |  | ||||||
|  |  | ||||||
|     if isr.tcif(channel_num % 4) && cr.read().tcie() { |  | ||||||
|         // Acknowledge  transfer complete interrupt |         // Acknowledge  transfer complete interrupt | ||||||
|         dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); |         dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); | ||||||
|         STATE.complete_count[index].fetch_add(1, Ordering::Release); |         STATE.complete_count[index].fetch_add(1, Ordering::Release); | ||||||
|         wake = true; |     } else { | ||||||
|  |         return; | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     if wake { |     STATE.ch_wakers[index].wake(); | ||||||
|         STATE.ch_wakers[index].wake(); |  | ||||||
|     } |  | ||||||
| } | } | ||||||
|  |  | ||||||
| #[cfg(any(dma_v2, dmamux))] | #[cfg(any(dma_v2, dmamux))] | ||||||
| @@ -612,7 +606,7 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { | |||||||
| struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>); | ||||||
|  |  | ||||||
| impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> { | ||||||
|     fn ndtr(&self) -> usize { |     fn get_remaining_transfers(&self) -> usize { | ||||||
|         let ch = self.0.regs().st(self.0.num()); |         let ch = self.0.regs().st(self.0.num()); | ||||||
|         unsafe { ch.ndtr().read() }.ndt() as usize |         unsafe { ch.ndtr().read() }.ndt() as usize | ||||||
|     } |     } | ||||||
| @@ -713,21 +707,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Read bytes from the ring buffer |     /// Read bytes from the ring buffer | ||||||
|  |     /// Return a tuple of the length read and the length remaining in the buffer | ||||||
|  |     /// If not all of the bytes were read, then there will be some bytes in the buffer remaining | ||||||
|  |     /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read | ||||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. |     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||||||
|     pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> { |     pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||||||
|         self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) |         self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf) | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn is_empty(&self) -> bool { |     // The capacity of the ringbuffer | ||||||
|         self.ringbuf.is_empty() |     pub fn cap(&self) -> usize { | ||||||
|     } |         self.ringbuf.cap() | ||||||
|  |  | ||||||
|     pub fn len(&self) -> usize { |  | ||||||
|         self.ringbuf.len() |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     pub fn capacity(&self) -> usize { |  | ||||||
|         self.ringbuf.dma_buf.len() |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     pub fn set_waker(&mut self, waker: &Waker) { |     pub fn set_waker(&mut self, waker: &Waker) { | ||||||
| @@ -766,12 +756,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { | |||||||
|         let ch = self.channel.regs().st(self.channel.num()); |         let ch = self.channel.regs().st(self.channel.num()); | ||||||
|         unsafe { ch.cr().read() }.en() |         unsafe { ch.cr().read() }.en() | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Synchronize the position of the ring buffer to the actual DMA controller position |  | ||||||
|     pub fn reload_position(&mut self) { |  | ||||||
|         let ch = self.channel.regs().st(self.channel.num()); |  | ||||||
|         self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; |  | ||||||
|     } |  | ||||||
| } | } | ||||||
|  |  | ||||||
| impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> { | ||||||
|   | |||||||
| @@ -25,14 +25,13 @@ use super::word::Word; | |||||||
| /// +-----------------------------------------+     +-----------------------------------------+ | /// +-----------------------------------------+     +-----------------------------------------+ | ||||||
| ///  ^          ^               ^                    ^            ^           ^ | ///  ^          ^               ^                    ^            ^           ^ | ||||||
| ///  |          |               |                    |            |           | | ///  |          |               |                    |            |           | | ||||||
| ///  +- first --+               |                    +- end ------+           | | ///  +- start --+               |                    +- end ------+           | | ||||||
| ///  |                          |                    |                        | | ///  |                          |                    |                        | | ||||||
| ///  +- end --------------------+                    +- first ----------------+ | ///  +- end --------------------+                    +- start ----------------+ | ||||||
| /// ``` | /// ``` | ||||||
| pub struct DmaRingBuffer<'a, W: Word> { | pub struct DmaRingBuffer<'a, W: Word> { | ||||||
|     pub(crate) dma_buf: &'a mut [W], |     pub(crate) dma_buf: &'a mut [W], | ||||||
|     first: usize, |     start: usize, | ||||||
|     pub ndtr: usize, |  | ||||||
| } | } | ||||||
|  |  | ||||||
| #[derive(Debug, PartialEq)] | #[derive(Debug, PartialEq)] | ||||||
| @@ -41,7 +40,7 @@ pub struct OverrunError; | |||||||
| pub trait DmaCtrl { | pub trait DmaCtrl { | ||||||
|     /// Get the NDTR register value, i.e. the space left in the underlying |     /// Get the NDTR register value, i.e. the space left in the underlying | ||||||
|     /// buffer until the dma writer wraps. |     /// buffer until the dma writer wraps. | ||||||
|     fn ndtr(&self) -> usize; |     fn get_remaining_transfers(&self) -> usize; | ||||||
|  |  | ||||||
|     /// Get the transfer completed counter. |     /// Get the transfer completed counter. | ||||||
|     /// This counter is incremented by the dma controller when NDTR is reloaded, |     /// This counter is incremented by the dma controller when NDTR is reloaded, | ||||||
| @@ -54,151 +53,131 @@ pub trait DmaCtrl { | |||||||
|  |  | ||||||
| impl<'a, W: Word> DmaRingBuffer<'a, W> { | impl<'a, W: Word> DmaRingBuffer<'a, W> { | ||||||
|     pub fn new(dma_buf: &'a mut [W]) -> Self { |     pub fn new(dma_buf: &'a mut [W]) -> Self { | ||||||
|         let ndtr = dma_buf.len(); |         Self { dma_buf, start: 0 } | ||||||
|         Self { |  | ||||||
|             dma_buf, |  | ||||||
|             first: 0, |  | ||||||
|             ndtr, |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Reset the ring buffer to its initial state |     /// Reset the ring buffer to its initial state | ||||||
|     pub fn clear(&mut self, mut dma: impl DmaCtrl) { |     pub fn clear(&mut self, mut dma: impl DmaCtrl) { | ||||||
|         self.first = 0; |         self.start = 0; | ||||||
|         self.ndtr = self.dma_buf.len(); |  | ||||||
|         dma.reset_complete_count(); |         dma.reset_complete_count(); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// The buffer end position |     /// The capacity of the ringbuffer | ||||||
|     fn end(&self) -> usize { |     pub const fn cap(&self) -> usize { | ||||||
|         self.dma_buf.len() - self.ndtr |         self.dma_buf.len() | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Returns whether the buffer is empty |     /// The current position of the ringbuffer | ||||||
|     pub fn is_empty(&self) -> bool { |     fn pos(&self, remaining_transfers: usize) -> usize { | ||||||
|         self.first == self.end() |         self.cap() - remaining_transfers | ||||||
|     } |  | ||||||
|  |  | ||||||
|     /// The current number of bytes in the buffer |  | ||||||
|     /// This may change at any time if dma is currently active |  | ||||||
|     pub fn len(&self) -> usize { |  | ||||||
|         // Read out a stable end (the dma periheral can change it at anytime) |  | ||||||
|         let end = self.end(); |  | ||||||
|         if self.first <= end { |  | ||||||
|             // No wrap |  | ||||||
|             end - self.first |  | ||||||
|         } else { |  | ||||||
|             self.dma_buf.len() - self.first + end |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Read bytes from the ring buffer |     /// Read bytes from the ring buffer | ||||||
|  |     /// Return a tuple of the length read and the length remaining in the buffer | ||||||
|  |     /// If not all of the bytes were read, then there will be some bytes in the buffer remaining | ||||||
|  |     /// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read | ||||||
|     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. |     /// OverrunError is returned if the portion to be read was overwritten by the DMA controller. | ||||||
|     pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> { |     pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> { | ||||||
|         let end = self.end(); |         /* | ||||||
|  |             This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check | ||||||
|  |             after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed | ||||||
|  |             to fire in the same clock cycle that a register is read, so checking get_complete_count early does | ||||||
|  |             not yield relevant information. | ||||||
|  |  | ||||||
|         compiler_fence(Ordering::SeqCst); |             Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full | ||||||
|  |             buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error | ||||||
|  |             conditions. | ||||||
|  |  | ||||||
|         if self.first == end { |             After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that | ||||||
|             // The buffer is currently empty |             the dma has not overrun within the data we could have copied. We check the data we could have copied | ||||||
|  |             rather than the data we actually copied because it costs nothing and confirms an error condition | ||||||
|             if dma.get_complete_count() > 0 { |             earlier. | ||||||
|                 // The DMA has written such that the ring buffer wraps at least once |         */ | ||||||
|                 self.ndtr = dma.ndtr(); |         let end = self.pos(dma.get_remaining_transfers()); | ||||||
|                 if self.end() > self.first || dma.get_complete_count() > 1 { |         if self.start == end && dma.get_complete_count() == 0 { | ||||||
|                     return Err(OverrunError); |             // No bytes are available in the buffer | ||||||
|                 } |             Ok((0, self.cap())) | ||||||
|             } |         } else if self.start < end { | ||||||
|  |  | ||||||
|             Ok(0) |  | ||||||
|         } else if self.first < end { |  | ||||||
|             // The available, unread portion in the ring buffer DOES NOT wrap |             // The available, unread portion in the ring buffer DOES NOT wrap | ||||||
|  |  | ||||||
|             if dma.get_complete_count() > 1 { |  | ||||||
|                 return Err(OverrunError); |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             // Copy out the bytes from the dma buffer |             // Copy out the bytes from the dma buffer | ||||||
|             let len = self.copy_to(buf, self.first..end); |             let len = self.copy_to(buf, self.start..end); | ||||||
|  |  | ||||||
|             compiler_fence(Ordering::SeqCst); |             compiler_fence(Ordering::SeqCst); | ||||||
|  |  | ||||||
|             match dma.get_complete_count() { |             /* | ||||||
|                 0 => { |                 first, check if the dma has wrapped at all if it's after end | ||||||
|                     // The DMA writer has not wrapped before nor after the copy |                 or more than once if it's before start | ||||||
|                 } |  | ||||||
|                 1 => { |  | ||||||
|                     // The DMA writer has written such that the ring buffer now wraps |  | ||||||
|                     self.ndtr = dma.ndtr(); |  | ||||||
|                     if self.end() > self.first || dma.get_complete_count() > 1 { |  | ||||||
|                         // The bytes that we have copied out have overflowed |  | ||||||
|                         // as the writer has now both wrapped and is currently writing |  | ||||||
|                         // within the region that we have just copied out |  | ||||||
|                         return Err(OverrunError); |  | ||||||
|                     } |  | ||||||
|                 } |  | ||||||
|                 _ => { |  | ||||||
|                     return Err(OverrunError); |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             self.first = (self.first + len) % self.dma_buf.len(); |                 this is in a critical section to try to reduce mushy behavior. | ||||||
|             Ok(len) |                 it's not ideal but it's the best we can do | ||||||
|  |  | ||||||
|  |                 then, get the current position of of the dma write and check | ||||||
|  |                 if it's inside data we could have copied | ||||||
|  |             */ | ||||||
|  |             let (pos, complete_count) = | ||||||
|  |                 critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count())); | ||||||
|  |             if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 { | ||||||
|  |                 Err(OverrunError) | ||||||
|  |             } else { | ||||||
|  |                 self.start = (self.start + len) % self.cap(); | ||||||
|  |  | ||||||
|  |                 Ok((len, self.cap() - self.start)) | ||||||
|  |             } | ||||||
|  |         } else if self.start + buf.len() < self.cap() { | ||||||
|  |             // The available, unread portion in the ring buffer DOES wrap | ||||||
|  |             // The DMA writer has wrapped since we last read and is currently | ||||||
|  |             // writing (or the next byte added will be) in the beginning of the ring buffer. | ||||||
|  |  | ||||||
|  |             // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. | ||||||
|  |  | ||||||
|  |             // Copy out from the dma buffer | ||||||
|  |             let len = self.copy_to(buf, self.start..self.cap()); | ||||||
|  |  | ||||||
|  |             compiler_fence(Ordering::SeqCst); | ||||||
|  |  | ||||||
|  |             /* | ||||||
|  |                 first, check if the dma has wrapped around more than once | ||||||
|  |  | ||||||
|  |                 then, get the current position of of the dma write and check | ||||||
|  |                 if it's inside data we could have copied | ||||||
|  |             */ | ||||||
|  |             let pos = self.pos(dma.get_remaining_transfers()); | ||||||
|  |             if pos > self.start || pos < end || dma.get_complete_count() > 1 { | ||||||
|  |                 Err(OverrunError) | ||||||
|  |             } else { | ||||||
|  |                 self.start = (self.start + len) % self.cap(); | ||||||
|  |  | ||||||
|  |                 Ok((len, self.start + end)) | ||||||
|  |             } | ||||||
|         } else { |         } else { | ||||||
|             // The available, unread portion in the ring buffer DOES wrap |             // The available, unread portion in the ring buffer DOES wrap | ||||||
|             // The DMA writer has wrapped since we last read and is currently |             // The DMA writer has wrapped since we last read and is currently | ||||||
|             // writing (or the next byte added will be) in the beginning of the ring buffer. |             // writing (or the next byte added will be) in the beginning of the ring buffer. | ||||||
|  |  | ||||||
|             let complete_count = dma.get_complete_count(); |             // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, | ||||||
|             if complete_count > 1 { |             // so the next read will not have any unread tail bytes in the ring buffer. | ||||||
|                 return Err(OverrunError); |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             // If the unread portion wraps then the writer must also have wrapped |             // Copy out from the dma buffer | ||||||
|             assert!(complete_count == 1); |             let tail = self.copy_to(buf, self.start..self.cap()); | ||||||
|  |             let head = self.copy_to(&mut buf[tail..], 0..end); | ||||||
|  |  | ||||||
|             if self.first + buf.len() < self.dma_buf.len() { |             compiler_fence(Ordering::SeqCst); | ||||||
|                 // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. |  | ||||||
|  |  | ||||||
|                 // Copy out from the dma buffer |             /* | ||||||
|                 let len = self.copy_to(buf, self.first..self.dma_buf.len()); |                 first, check if the dma has wrapped around more than once | ||||||
|  |  | ||||||
|                 compiler_fence(Ordering::SeqCst); |                 then, get the current position of of the dma write and check | ||||||
|  |                 if it's inside data we could have copied | ||||||
|                 // We have now copied out the data from dma_buf |             */ | ||||||
|                 // Make sure that the just read part was not overwritten during the copy |             let pos = self.pos(dma.get_remaining_transfers()); | ||||||
|                 self.ndtr = dma.ndtr(); |             if pos > self.start || pos < end || dma.reset_complete_count() > 1 { | ||||||
|                 if self.end() > self.first || dma.get_complete_count() > 1 { |                 Err(OverrunError) | ||||||
|                     // The writer has entered the data that we have just read since we read out `end` in the beginning and until now. |  | ||||||
|                     return Err(OverrunError); |  | ||||||
|                 } |  | ||||||
|  |  | ||||||
|                 self.first = (self.first + len) % self.dma_buf.len(); |  | ||||||
|                 Ok(len) |  | ||||||
|             } else { |             } else { | ||||||
|                 // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, |                 self.start = head; | ||||||
|                 // so the next read will not have any unread tail bytes in the ring buffer. |                 Ok((tail + head, self.cap() - self.start)) | ||||||
|  |  | ||||||
|                 // Copy out from the dma buffer |  | ||||||
|                 let tail = self.copy_to(buf, self.first..self.dma_buf.len()); |  | ||||||
|                 let head = self.copy_to(&mut buf[tail..], 0..end); |  | ||||||
|  |  | ||||||
|                 compiler_fence(Ordering::SeqCst); |  | ||||||
|  |  | ||||||
|                 // We have now copied out the data from dma_buf |  | ||||||
|                 // Reset complete counter and make sure that the just read part was not overwritten during the copy |  | ||||||
|                 self.ndtr = dma.ndtr(); |  | ||||||
|                 let complete_count = dma.reset_complete_count(); |  | ||||||
|                 if self.end() > self.first || complete_count > 1 { |  | ||||||
|                     return Err(OverrunError); |  | ||||||
|                 } |  | ||||||
|  |  | ||||||
|                 self.first = head; |  | ||||||
|                 Ok(tail + head) |  | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     /// Copy from the dma buffer at `data_range` into `buf` |     /// Copy from the dma buffer at `data_range` into `buf` | ||||||
|     fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { |     fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize { | ||||||
|         // Limit the number of bytes that can be copied |         // Limit the number of bytes that can be copied | ||||||
| @@ -218,203 +197,289 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { | |||||||
|         length |         length | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
| #[cfg(test)] | #[cfg(test)] | ||||||
| mod tests { | mod tests { | ||||||
|     use core::array; |     use core::array; | ||||||
|     use core::cell::RefCell; |     use std::{cell, vec}; | ||||||
|  |  | ||||||
|     use super::*; |     use super::*; | ||||||
|  |  | ||||||
|     struct TestCtrl { |     #[allow(dead_code)] | ||||||
|         next_ndtr: RefCell<Option<usize>>, |     #[derive(PartialEq, Debug)] | ||||||
|         complete_count: usize, |     enum TestCircularTransferRequest { | ||||||
|  |         GetCompleteCount(usize), | ||||||
|  |         ResetCompleteCount(usize), | ||||||
|  |         PositionRequest(usize), | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     impl TestCtrl { |     struct TestCircularTransfer { | ||||||
|         pub const fn new() -> Self { |         len: usize, | ||||||
|             Self { |         requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>, | ||||||
|                 next_ndtr: RefCell::new(None), |     } | ||||||
|                 complete_count: 0, |  | ||||||
|  |     impl DmaCtrl for &mut TestCircularTransfer { | ||||||
|  |         fn get_remaining_transfers(&self) -> usize { | ||||||
|  |             match self.requests.borrow_mut().pop().unwrap() { | ||||||
|  |                 TestCircularTransferRequest::PositionRequest(pos) => { | ||||||
|  |                     let len = self.len; | ||||||
|  |  | ||||||
|  |                     assert!(len >= pos); | ||||||
|  |  | ||||||
|  |                     len - pos | ||||||
|  |                 } | ||||||
|  |                 _ => unreachable!(), | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         pub fn set_next_ndtr(&mut self, ndtr: usize) { |  | ||||||
|             self.next_ndtr.borrow_mut().replace(ndtr); |  | ||||||
|         } |  | ||||||
|     } |  | ||||||
|  |  | ||||||
|     impl DmaCtrl for &mut TestCtrl { |  | ||||||
|         fn ndtr(&self) -> usize { |  | ||||||
|             self.next_ndtr.borrow_mut().unwrap() |  | ||||||
|         } |  | ||||||
|  |  | ||||||
|         fn get_complete_count(&self) -> usize { |         fn get_complete_count(&self) -> usize { | ||||||
|             self.complete_count |             match self.requests.borrow_mut().pop().unwrap() { | ||||||
|  |                 TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count, | ||||||
|  |                 _ => unreachable!(), | ||||||
|  |             } | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         fn reset_complete_count(&mut self) -> usize { |         fn reset_complete_count(&mut self) -> usize { | ||||||
|             let old = self.complete_count; |             match self.requests.get_mut().pop().unwrap() { | ||||||
|             self.complete_count = 0; |                 TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count, | ||||||
|             old |                 _ => unreachable!(), | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     impl TestCircularTransfer { | ||||||
|  |         pub fn new(len: usize) -> Self { | ||||||
|  |             Self { | ||||||
|  |                 requests: cell::RefCell::new(vec![]), | ||||||
|  |                 len: len, | ||||||
|  |             } | ||||||
|  |         } | ||||||
|  |  | ||||||
|  |         pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) { | ||||||
|  |             requests.reverse(); | ||||||
|  |             self.requests.replace(requests); | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn empty() { |     fn empty_and_read_not_started() { | ||||||
|         let mut dma_buf = [0u8; 16]; |         let mut dma_buf = [0u8; 16]; | ||||||
|         let ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|  |  | ||||||
|         assert!(ringbuf.is_empty()); |         assert_eq!(0, ringbuf.start); | ||||||
|         assert_eq!(0, ringbuf.len()); |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn can_read() { |     fn can_read() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.ndtr = 6; |  | ||||||
|  |  | ||||||
|         assert!(!ringbuf.is_empty()); |         assert_eq!(0, ringbuf.start); | ||||||
|         assert_eq!(10, ringbuf.len()); |         assert_eq!(16, ringbuf.len()); | ||||||
|  |  | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(8), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(10), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|         let mut buf = [0; 2]; |         let mut buf = [0; 2]; | ||||||
|         assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|         assert_eq!([0, 1], buf); |         assert_eq!([0, 1], buf); | ||||||
|         assert_eq!(8, ringbuf.len()); |         assert_eq!(2, ringbuf.start); | ||||||
|  |  | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(10), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(12), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|         let mut buf = [0; 2]; |         let mut buf = [0; 2]; | ||||||
|         assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|         assert_eq!([2, 3], buf); |         assert_eq!([2, 3], buf); | ||||||
|         assert_eq!(6, ringbuf.len()); |         assert_eq!(4, ringbuf.start); | ||||||
|  |  | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(12), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(14), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|         let mut buf = [0; 8]; |         let mut buf = [0; 8]; | ||||||
|         assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |         assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|         assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); |         assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]); | ||||||
|         assert_eq!(0, ringbuf.len()); |         assert_eq!(12, ringbuf.start); | ||||||
|  |  | ||||||
|         let mut buf = [0; 2]; |  | ||||||
|         assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn can_read_with_wrap() { |     fn can_read_with_wrap() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.first = 12; |  | ||||||
|         ringbuf.ndtr = 10; |  | ||||||
|  |  | ||||||
|         // The dma controller has written 4 + 6 bytes and has reloaded NDTR |         assert_eq!(0, ringbuf.start); | ||||||
|         ctrl.complete_count = 1; |         assert_eq!(16, ringbuf.len()); | ||||||
|         ctrl.set_next_ndtr(10); |  | ||||||
|  |  | ||||||
|         assert!(!ringbuf.is_empty()); |         /* | ||||||
|         assert_eq!(6 + 4, ringbuf.len()); |             Read to close to the end of the buffer | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(14), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(16), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 14]; | ||||||
|  |         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(14, ringbuf.start); | ||||||
|  |  | ||||||
|         let mut buf = [0; 2]; |         /* | ||||||
|         assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |             Now, read around the buffer | ||||||
|         assert_eq!([12, 13], buf); |         */ | ||||||
|         assert_eq!(6 + 2, ringbuf.len()); |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|         let mut buf = [0; 4]; |             TestCircularTransferRequest::PositionRequest(8), | ||||||
|         assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |             TestCircularTransferRequest::ResetCompleteCount(1), | ||||||
|         assert_eq!([14, 15, 0, 1], buf); |         ]); | ||||||
|         assert_eq!(4, ringbuf.len()); |         let mut buf = [0; 6]; | ||||||
|  |         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(4, ringbuf.start); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { |     fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.first = 2; |  | ||||||
|         ringbuf.ndtr = 6; |  | ||||||
|  |  | ||||||
|         // The dma controller has written 6 + 2 bytes and has reloaded NDTR |         assert_eq!(0, ringbuf.start); | ||||||
|         ctrl.complete_count = 1; |         assert_eq!(16, ringbuf.len()); | ||||||
|         ctrl.set_next_ndtr(14); |  | ||||||
|  |  | ||||||
|  |         /* | ||||||
|  |             Read to close to the end of the buffer | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(14), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(16), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 14]; | ||||||
|  |         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(14, ringbuf.start); | ||||||
|  |  | ||||||
|  |         /* | ||||||
|  |             Now, read to the end of the buffer | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(8), | ||||||
|  |             TestCircularTransferRequest::ResetCompleteCount(1), | ||||||
|  |         ]); | ||||||
|         let mut buf = [0; 2]; |         let mut buf = [0; 2]; | ||||||
|         assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|         assert_eq!([2, 3], buf); |         assert_eq!(0, ringbuf.start); | ||||||
|  |  | ||||||
|         assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn can_read_when_dma_writer_is_wrapped_and_read_wraps() { |     fn can_read_when_dma_writer_wraps_once_with_same_ndtr() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.first = 12; |  | ||||||
|         ringbuf.ndtr = 10; |  | ||||||
|  |  | ||||||
|         // The dma controller has written 6 + 2 bytes and has reloaded NDTR |         assert_eq!(0, ringbuf.start); | ||||||
|         ctrl.complete_count = 1; |         assert_eq!(16, ringbuf.len()); | ||||||
|         ctrl.set_next_ndtr(14); |  | ||||||
|  |  | ||||||
|         let mut buf = [0; 10]; |         /* | ||||||
|         assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); |             Read to about the middle of the buffer | ||||||
|         assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 6]; | ||||||
|  |         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(6, ringbuf.start); | ||||||
|  |  | ||||||
|         assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared |         /* | ||||||
|     } |             Now, wrap the DMA controller around | ||||||
|  |         */ | ||||||
|     #[test] |         dma.setup(vec![ | ||||||
|     fn cannot_read_when_dma_writer_wraps_with_same_ndtr() { |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|         let mut dma_buf = [0u8; 16]; |             TestCircularTransferRequest::GetCompleteCount(1), | ||||||
|         let mut ctrl = TestCtrl::new(); |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |             TestCircularTransferRequest::GetCompleteCount(1), | ||||||
|         ringbuf.first = 6; |         ]); | ||||||
|         ringbuf.ndtr = 10; |         let mut buf = [0; 6]; | ||||||
|         ctrl.set_next_ndtr(9); |         assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(12, ringbuf.start); | ||||||
|         assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty |  | ||||||
|  |  | ||||||
|         // The dma controller has written exactly 16 bytes |  | ||||||
|         ctrl.complete_count = 1; |  | ||||||
|  |  | ||||||
|         let mut buf = [0; 2]; |  | ||||||
|         assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |  | ||||||
|  |  | ||||||
|         assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |  | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { |     fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.first = 2; |  | ||||||
|         ringbuf.ndtr = 6; |  | ||||||
|  |  | ||||||
|         // The dma controller has written 6 + 3 bytes and has reloaded NDTR |         assert_eq!(0, ringbuf.start); | ||||||
|         ctrl.complete_count = 1; |         assert_eq!(16, ringbuf.len()); | ||||||
|         ctrl.set_next_ndtr(13); |  | ||||||
|  |  | ||||||
|         let mut buf = [0; 2]; |         /* | ||||||
|         assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |             Read a few bytes | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(2), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(2), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 6]; | ||||||
|  |         assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(2, ringbuf.start); | ||||||
|  |  | ||||||
|         assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |         /* | ||||||
|  |             Now, overtake the reader | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(4), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(6), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(1), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 6]; | ||||||
|  |         assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     #[test] |     #[test] | ||||||
|     fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { |     fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() { | ||||||
|  |         let mut dma = TestCircularTransfer::new(16); | ||||||
|  |  | ||||||
|         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 |         let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15 | ||||||
|         let mut ctrl = TestCtrl::new(); |  | ||||||
|         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); |         let mut ringbuf = DmaRingBuffer::new(&mut dma_buf); | ||||||
|         ringbuf.first = 12; |  | ||||||
|         ringbuf.ndtr = 10; |  | ||||||
|  |  | ||||||
|         // The dma controller has written 6 + 13 bytes and has reloaded NDTR |         assert_eq!(0, ringbuf.start); | ||||||
|         ctrl.complete_count = 1; |         assert_eq!(16, ringbuf.len()); | ||||||
|         ctrl.set_next_ndtr(3); |  | ||||||
|  |  | ||||||
|         let mut buf = [0; 2]; |         /* | ||||||
|         assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); |             Read to close to the end of the buffer | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(14), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(16), | ||||||
|  |             TestCircularTransferRequest::GetCompleteCount(0), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 14]; | ||||||
|  |         assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0); | ||||||
|  |         assert_eq!(14, ringbuf.start); | ||||||
|  |  | ||||||
|         assert_eq!(1, ctrl.complete_count); // The complete counter is not reset |         /* | ||||||
|  |             Now, overtake the reader | ||||||
|  |         */ | ||||||
|  |         dma.setup(vec![ | ||||||
|  |             TestCircularTransferRequest::PositionRequest(8), | ||||||
|  |             TestCircularTransferRequest::PositionRequest(10), | ||||||
|  |             TestCircularTransferRequest::ResetCompleteCount(2), | ||||||
|  |         ]); | ||||||
|  |         let mut buf = [0; 6]; | ||||||
|  |         assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err()); | ||||||
|     } |     } | ||||||
| } | } | ||||||
|   | |||||||
| @@ -1,4 +1,4 @@ | |||||||
| #![no_std] | #![cfg_attr(not(test), no_std)] | ||||||
| #![cfg_attr(feature = "nightly", feature(async_fn_in_trait, impl_trait_projections))] | #![cfg_attr(feature = "nightly", feature(async_fn_in_trait, impl_trait_projections))] | ||||||
|  |  | ||||||
| // This must go FIRST so that all the other modules see its macros. | // This must go FIRST so that all the other modules see its macros. | ||||||
|   | |||||||
| @@ -13,6 +13,12 @@ use futures::future::{select, Either}; | |||||||
| use crate::dma::{NoDma, Transfer}; | use crate::dma::{NoDma, Transfer}; | ||||||
| use crate::gpio::sealed::AFType; | use crate::gpio::sealed::AFType; | ||||||
| #[cfg(not(any(usart_v1, usart_v2)))] | #[cfg(not(any(usart_v1, usart_v2)))] | ||||||
|  | #[allow(unused_imports)] | ||||||
|  | use crate::pac::usart::regs::Isr as Sr; | ||||||
|  | #[cfg(any(usart_v1, usart_v2))] | ||||||
|  | #[allow(unused_imports)] | ||||||
|  | use crate::pac::usart::regs::Sr; | ||||||
|  | #[cfg(not(any(usart_v1, usart_v2)))] | ||||||
| use crate::pac::usart::Lpuart as Regs; | use crate::pac::usart::Lpuart as Regs; | ||||||
| #[cfg(any(usart_v1, usart_v2))] | #[cfg(any(usart_v1, usart_v2))] | ||||||
| use crate::pac::usart::Usart as Regs; | use crate::pac::usart::Usart as Regs; | ||||||
| @@ -32,7 +38,6 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T> | |||||||
|  |  | ||||||
|         let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; |         let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) }; | ||||||
|  |  | ||||||
|         let mut wake = false; |  | ||||||
|         let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); |         let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie()); | ||||||
|         if has_errors { |         if has_errors { | ||||||
|             // clear all interrupts and DMA Rx Request |             // clear all interrupts and DMA Rx Request | ||||||
| @@ -52,35 +57,24 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T> | |||||||
|                     w.set_dmar(false); |                     w.set_dmar(false); | ||||||
|                 }); |                 }); | ||||||
|             } |             } | ||||||
|  |         } else if cr1.idleie() && sr.idle() { | ||||||
|  |             // IDLE detected: no more data will come | ||||||
|  |             unsafe { | ||||||
|  |                 r.cr1().modify(|w| { | ||||||
|  |                     // disable idle line detection | ||||||
|  |                     w.set_idleie(false); | ||||||
|  |                 }); | ||||||
|  |             } | ||||||
|  |         } else if cr1.rxneie() { | ||||||
|  |             // We cannot check the RXNE flag as it is auto-cleared by the DMA controller | ||||||
|  |  | ||||||
|             wake = true; |             // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection | ||||||
|         } else { |         } else { | ||||||
|             if cr1.idleie() && sr.idle() { |             return; | ||||||
|                 // IDLE detected: no more data will come |  | ||||||
|                 unsafe { |  | ||||||
|                     r.cr1().modify(|w| { |  | ||||||
|                         // disable idle line detection |  | ||||||
|                         w.set_idleie(false); |  | ||||||
|                     }); |  | ||||||
|                 } |  | ||||||
|  |  | ||||||
|                 wake = true; |  | ||||||
|             } |  | ||||||
|  |  | ||||||
|             if cr1.rxneie() { |  | ||||||
|                 // We cannot check the RXNE flag as it is auto-cleared by the DMA controller |  | ||||||
|  |  | ||||||
|                 // It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection |  | ||||||
|  |  | ||||||
|                 wake = true; |  | ||||||
|             } |  | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         if wake { |         compiler_fence(Ordering::SeqCst); | ||||||
|             compiler_fence(Ordering::SeqCst); |         s.rx_waker.wake(); | ||||||
|  |  | ||||||
|             s.rx_waker.wake(); |  | ||||||
|         } |  | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -1109,9 +1103,9 @@ pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler; | |||||||
| mod buffered; | mod buffered; | ||||||
|  |  | ||||||
| #[cfg(not(gpdma))] | #[cfg(not(gpdma))] | ||||||
| mod rx_ringbuffered; | mod ringbuffered; | ||||||
| #[cfg(not(gpdma))] | #[cfg(not(gpdma))] | ||||||
| pub use rx_ringbuffered::RingBufferedUartRx; | pub use ringbuffered::RingBufferedUartRx; | ||||||
|  |  | ||||||
| use self::sealed::Kind; | use self::sealed::Kind; | ||||||
|  |  | ||||||
|   | |||||||
| @@ -2,13 +2,12 @@ use core::future::poll_fn; | |||||||
| use core::sync::atomic::{compiler_fence, Ordering}; | use core::sync::atomic::{compiler_fence, Ordering}; | ||||||
| use core::task::Poll; | use core::task::Poll; | ||||||
| 
 | 
 | ||||||
| use embassy_hal_common::drop::OnDrop; |  | ||||||
| use embassy_hal_common::PeripheralRef; | use embassy_hal_common::PeripheralRef; | ||||||
| use futures::future::{select, Either}; | use futures::future::{select, Either}; | ||||||
| 
 | 
 | ||||||
| use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; | use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; | ||||||
| use crate::dma::ringbuffer::OverrunError; |  | ||||||
| use crate::dma::RingBuffer; | use crate::dma::RingBuffer; | ||||||
|  | use crate::usart::{Regs, Sr}; | ||||||
| 
 | 
 | ||||||
| pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { | pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> { | ||||||
|     _peri: PeripheralRef<'d, T>, |     _peri: PeripheralRef<'d, T>, | ||||||
| @@ -24,7 +23,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> { | |||||||
| 
 | 
 | ||||||
|         let request = self.rx_dma.request(); |         let request = self.rx_dma.request(); | ||||||
|         let opts = Default::default(); |         let opts = Default::default(); | ||||||
|  | 
 | ||||||
|         let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; |         let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) }; | ||||||
|  | 
 | ||||||
|         RingBufferedUartRx { |         RingBufferedUartRx { | ||||||
|             _peri: self._peri, |             _peri: self._peri, | ||||||
|             ring_buf, |             ring_buf, | ||||||
| @@ -42,11 +43,18 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
|         Ok(()) |         Ok(()) | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     fn stop(&mut self, err: Error) -> Result<usize, Error> { | ||||||
|  |         self.teardown_uart(); | ||||||
|  | 
 | ||||||
|  |         Err(err) | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     /// Start uart background receive
 |     /// Start uart background receive
 | ||||||
|     fn setup_uart(&mut self) { |     fn setup_uart(&mut self) { | ||||||
|         // fence before starting DMA.
 |         // fence before starting DMA.
 | ||||||
|         compiler_fence(Ordering::SeqCst); |         compiler_fence(Ordering::SeqCst); | ||||||
| 
 | 
 | ||||||
|  |         // start the dma controller
 | ||||||
|         self.ring_buf.start(); |         self.ring_buf.start(); | ||||||
| 
 | 
 | ||||||
|         let r = T::regs(); |         let r = T::regs(); | ||||||
| @@ -58,8 +66,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
|                 w.set_rxneie(false); |                 w.set_rxneie(false); | ||||||
|                 // enable parity interrupt if not ParityNone
 |                 // enable parity interrupt if not ParityNone
 | ||||||
|                 w.set_peie(w.pce()); |                 w.set_peie(w.pce()); | ||||||
|                 // disable idle line interrupt
 |                 // enable idle line interrupt
 | ||||||
|                 w.set_idleie(false); |                 w.set_idleie(true); | ||||||
|             }); |             }); | ||||||
|             r.cr3().modify(|w| { |             r.cr3().modify(|w| { | ||||||
|                 // enable Error Interrupt: (Frame error, Noise error, Overrun error)
 |                 // enable Error Interrupt: (Frame error, Noise error, Overrun error)
 | ||||||
| @@ -72,6 +80,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
| 
 | 
 | ||||||
|     /// Stop uart background receive
 |     /// Stop uart background receive
 | ||||||
|     fn teardown_uart(&mut self) { |     fn teardown_uart(&mut self) { | ||||||
|  |         self.ring_buf.request_stop(); | ||||||
|  | 
 | ||||||
|         let r = T::regs(); |         let r = T::regs(); | ||||||
|         // clear all interrupts and DMA Rx Request
 |         // clear all interrupts and DMA Rx Request
 | ||||||
|         // SAFETY: only clears Rx related flags
 |         // SAFETY: only clears Rx related flags
 | ||||||
| @@ -93,9 +103,6 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
|         } |         } | ||||||
| 
 | 
 | ||||||
|         compiler_fence(Ordering::SeqCst); |         compiler_fence(Ordering::SeqCst); | ||||||
| 
 |  | ||||||
|         self.ring_buf.request_stop(); |  | ||||||
|         while self.ring_buf.is_running() {} |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Read bytes that are readily available in the ring buffer.
 |     /// Read bytes that are readily available in the ring buffer.
 | ||||||
| @@ -111,96 +118,49 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
| 
 | 
 | ||||||
|         // Start background receive if it was not already started
 |         // Start background receive if it was not already started
 | ||||||
|         // SAFETY: read only
 |         // SAFETY: read only
 | ||||||
|         let is_started = unsafe { r.cr3().read().dmar() }; |         match unsafe { r.cr3().read().dmar() } { | ||||||
|         if !is_started { |             false => self.start()?, | ||||||
|             self.start()?; |             _ => {} | ||||||
|         } |         }; | ||||||
| 
 | 
 | ||||||
|         // SAFETY: read only and we only use Rx related flags
 |         check_for_errors(clear_idle_flag(T::regs()))?; | ||||||
|         let s = unsafe { sr(r).read() }; |  | ||||||
|         let has_errors = s.pe() || s.fe() || s.ne() || s.ore(); |  | ||||||
|         if has_errors { |  | ||||||
|             self.teardown_uart(); |  | ||||||
| 
 |  | ||||||
|             if s.pe() { |  | ||||||
|                 return Err(Error::Parity); |  | ||||||
|             } else if s.fe() { |  | ||||||
|                 return Err(Error::Framing); |  | ||||||
|             } else if s.ne() { |  | ||||||
|                 return Err(Error::Noise); |  | ||||||
|             } else { |  | ||||||
|                 return Err(Error::Overrun); |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         self.ring_buf.reload_position(); |  | ||||||
|         match self.ring_buf.read(buf) { |  | ||||||
|             Ok(len) if len == 0 => {} |  | ||||||
|             Ok(len) => { |  | ||||||
|                 assert!(len > 0); |  | ||||||
|                 return Ok(len); |  | ||||||
|             } |  | ||||||
|             Err(OverrunError) => { |  | ||||||
|                 // Stop any transfer from now on
 |  | ||||||
|                 // The user must re-start to receive any more data
 |  | ||||||
|                 self.teardown_uart(); |  | ||||||
|                 return Err(Error::Overrun); |  | ||||||
|             } |  | ||||||
|         } |  | ||||||
| 
 | 
 | ||||||
|         loop { |         loop { | ||||||
|             self.wait_for_data_or_idle().await?; |             match self.ring_buf.read(buf) { | ||||||
|  |                 Ok((0, _)) => {} | ||||||
|  |                 Ok((len, _)) => { | ||||||
|  |                     return Ok(len); | ||||||
|  |                 } | ||||||
|  |                 Err(_) => { | ||||||
|  |                     return self.stop(Error::Overrun); | ||||||
|  |                 } | ||||||
|  |             } | ||||||
| 
 | 
 | ||||||
|             self.ring_buf.reload_position(); |             match self.wait_for_data_or_idle().await { | ||||||
|             if !self.ring_buf.is_empty() { |                 Ok(_) => {} | ||||||
|                 break; |                 Err(err) => { | ||||||
|  |                     return self.stop(err); | ||||||
|  |                 } | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
| 
 |  | ||||||
|         let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; |  | ||||||
|         assert!(len > 0); |  | ||||||
| 
 |  | ||||||
|         Ok(len) |  | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /// Wait for uart idle or dma half-full or full
 |     /// Wait for uart idle or dma half-full or full
 | ||||||
|     async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { |     async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { | ||||||
|         let r = T::regs(); |  | ||||||
| 
 |  | ||||||
|         // make sure USART state is restored to neutral state
 |  | ||||||
|         let _on_drop = OnDrop::new(move || { |  | ||||||
|             // SAFETY: only clears Rx related flags
 |  | ||||||
|             unsafe { |  | ||||||
|                 r.cr1().modify(|w| { |  | ||||||
|                     // disable idle line interrupt
 |  | ||||||
|                     w.set_idleie(false); |  | ||||||
|                 }); |  | ||||||
|             } |  | ||||||
|         }); |  | ||||||
| 
 |  | ||||||
|         // SAFETY: only sets Rx related flags
 |  | ||||||
|         unsafe { |  | ||||||
|             r.cr1().modify(|w| { |  | ||||||
|                 // enable idle line interrupt
 |  | ||||||
|                 w.set_idleie(true); |  | ||||||
|             }); |  | ||||||
|         } |  | ||||||
| 
 |  | ||||||
|         compiler_fence(Ordering::SeqCst); |         compiler_fence(Ordering::SeqCst); | ||||||
| 
 | 
 | ||||||
|  |         let mut dma_init = false; | ||||||
|         // Future which completes when there is dma is half full or full
 |         // Future which completes when there is dma is half full or full
 | ||||||
|         let dma = poll_fn(|cx| { |         let dma = poll_fn(|cx| { | ||||||
|             self.ring_buf.set_waker(cx.waker()); |             self.ring_buf.set_waker(cx.waker()); | ||||||
| 
 | 
 | ||||||
|             compiler_fence(Ordering::SeqCst); |             let status = match dma_init { | ||||||
|  |                 false => Poll::Pending, | ||||||
|  |                 true => Poll::Ready(()), | ||||||
|  |             }; | ||||||
| 
 | 
 | ||||||
|             self.ring_buf.reload_position(); |             dma_init = true; | ||||||
|             if !self.ring_buf.is_empty() { |             status | ||||||
|                 // Some data is now available
 |  | ||||||
|                 Poll::Ready(()) |  | ||||||
|             } else { |  | ||||||
|                 Poll::Pending |  | ||||||
|             } |  | ||||||
|         }); |         }); | ||||||
| 
 | 
 | ||||||
|         // Future which completes when idle line is detected
 |         // Future which completes when idle line is detected
 | ||||||
| @@ -210,28 +170,11 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
| 
 | 
 | ||||||
|             compiler_fence(Ordering::SeqCst); |             compiler_fence(Ordering::SeqCst); | ||||||
| 
 | 
 | ||||||
|             // SAFETY: read only and we only use Rx related flags
 |             // Critical section is needed so that IDLE isn't set after
 | ||||||
|             let sr = unsafe { sr(r).read() }; |             // our read but before we clear it.
 | ||||||
|  |             let sr = critical_section::with(|_| clear_idle_flag(T::regs())); | ||||||
| 
 | 
 | ||||||
|             // SAFETY: only clears Rx related flags
 |             check_for_errors(sr)?; | ||||||
|             unsafe { |  | ||||||
|                 // This read also clears the error and idle interrupt flags on v1.
 |  | ||||||
|                 rdr(r).read_volatile(); |  | ||||||
|                 clear_interrupt_flags(r, sr); |  | ||||||
|             } |  | ||||||
| 
 |  | ||||||
|             let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore(); |  | ||||||
|             if has_errors { |  | ||||||
|                 if sr.pe() { |  | ||||||
|                     return Poll::Ready(Err(Error::Parity)); |  | ||||||
|                 } else if sr.fe() { |  | ||||||
|                     return Poll::Ready(Err(Error::Framing)); |  | ||||||
|                 } else if sr.ne() { |  | ||||||
|                     return Poll::Ready(Err(Error::Noise)); |  | ||||||
|                 } else { |  | ||||||
|                     return Poll::Ready(Err(Error::Overrun)); |  | ||||||
|                 } |  | ||||||
|             } |  | ||||||
| 
 | 
 | ||||||
|             if sr.idle() { |             if sr.idle() { | ||||||
|                 // Idle line is detected
 |                 // Idle line is detected
 | ||||||
| @@ -243,11 +186,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD | |||||||
| 
 | 
 | ||||||
|         match select(dma, uart).await { |         match select(dma, uart).await { | ||||||
|             Either::Left(((), _)) => Ok(()), |             Either::Left(((), _)) => Ok(()), | ||||||
|             Either::Right((Ok(()), _)) => Ok(()), |             Either::Right((result, _)) => result, | ||||||
|             Either::Right((Err(e), _)) => { |  | ||||||
|                 self.teardown_uart(); |  | ||||||
|                 Err(e) |  | ||||||
|             } |  | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| } | } | ||||||
| @@ -257,6 +196,37 @@ impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T | |||||||
|         self.teardown_uart(); |         self.teardown_uart(); | ||||||
|     } |     } | ||||||
| } | } | ||||||
|  | /// Return an error result if the Sr register has errors
 | ||||||
|  | fn check_for_errors(s: Sr) -> Result<(), Error> { | ||||||
|  |     if s.pe() { | ||||||
|  |         Err(Error::Parity) | ||||||
|  |     } else if s.fe() { | ||||||
|  |         Err(Error::Framing) | ||||||
|  |     } else if s.ne() { | ||||||
|  |         Err(Error::Noise) | ||||||
|  |     } else if s.ore() { | ||||||
|  |         Err(Error::Overrun) | ||||||
|  |     } else { | ||||||
|  |         Ok(()) | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | /// Clear IDLE and return the Sr register
 | ||||||
|  | fn clear_idle_flag(r: Regs) -> Sr { | ||||||
|  |     unsafe { | ||||||
|  |         // SAFETY: read only and we only use Rx related flags
 | ||||||
|  | 
 | ||||||
|  |         let sr = sr(r).read(); | ||||||
|  | 
 | ||||||
|  |         // This read also clears the error and idle interrupt flags on v1.
 | ||||||
|  |         rdr(r).read_volatile(); | ||||||
|  |         clear_interrupt_flags(r, sr); | ||||||
|  | 
 | ||||||
|  |         r.cr1().modify(|w| w.set_idleie(true)); | ||||||
|  | 
 | ||||||
|  |         sr | ||||||
|  |     } | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| #[cfg(all(feature = "unstable-traits", feature = "nightly"))] | #[cfg(all(feature = "unstable-traits", feature = "nightly"))] | ||||||
| mod eio { | mod eio { | ||||||
		Reference in New Issue
	
	Block a user