Run rustfmt.

This commit is contained in:
Dario Nieuwenhuis
2022-06-12 22:15:44 +02:00
parent 6199bdea71
commit a8703b7598
340 changed files with 1326 additions and 3020 deletions

View File

@ -2,9 +2,10 @@
pub mod raw;
use self::raw::RawMutex;
use core::cell::UnsafeCell;
use self::raw::RawMutex;
/// Any object implementing this trait guarantees exclusive access to the data contained
/// within the mutex for the duration of the lock.
/// Adapted from <https://github.com/rust-embedded/mutex-trait>.

View File

@ -14,9 +14,7 @@ unsafe impl Sync for CriticalSectionRawMutex {}
impl CriticalSectionRawMutex {
pub const fn new() -> Self {
Self {
_phantom: PhantomData,
}
Self { _phantom: PhantomData }
}
}
@ -38,9 +36,7 @@ unsafe impl Send for NoopRawMutex {}
impl NoopRawMutex {
pub const fn new() -> Self {
Self {
_phantom: PhantomData,
}
Self { _phantom: PhantomData }
}
}
@ -66,19 +62,14 @@ mod thread_mode {
impl ThreadModeRawMutex {
pub const fn new() -> Self {
Self {
_phantom: PhantomData,
}
Self { _phantom: PhantomData }
}
}
impl RawMutex for ThreadModeRawMutex {
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
assert!(
in_thread_mode(),
"ThreadModeMutex can only be locked from thread mode."
);
assert!(in_thread_mode(), "ThreadModeMutex can only be locked from thread mode.");
f()
}
@ -104,8 +95,7 @@ mod thread_mode {
return Some("main") == std::thread::current().name();
#[cfg(not(feature = "std"))]
return cortex_m::peripheral::SCB::vect_active()
== cortex_m::peripheral::scb::VectActive::ThreadMode;
return cortex_m::peripheral::SCB::vect_active() == cortex_m::peripheral::scb::VectActive::ThreadMode;
}
}
#[cfg(any(cortex_m, feature = "std"))]

View File

@ -20,8 +20,7 @@
use core::cell::RefCell;
use core::pin::Pin;
use core::task::Context;
use core::task::Poll;
use core::task::{Context, Poll};
use futures::Future;
use heapless::Deque;
@ -44,9 +43,7 @@ where
M: RawMutex,
{
fn clone(&self) -> Self {
Sender {
channel: self.channel,
}
Sender { channel: self.channel }
}
}
@ -77,9 +74,7 @@ pub struct DynamicSender<'ch, T> {
impl<'ch, T> Clone for DynamicSender<'ch, T> {
fn clone(&self) -> Self {
DynamicSender {
channel: self.channel,
}
DynamicSender { channel: self.channel }
}
}
@ -125,9 +120,7 @@ where
M: RawMutex,
{
fn clone(&self) -> Self {
Receiver {
channel: self.channel,
}
Receiver { channel: self.channel }
}
}
@ -158,9 +151,7 @@ pub struct DynamicReceiver<'ch, T> {
impl<'ch, T> Clone for DynamicReceiver<'ch, T> {
fn clone(&self) -> Self {
DynamicReceiver {
channel: self.channel,
}
DynamicReceiver { channel: self.channel }
}
}
@ -169,9 +160,7 @@ impl<'ch, T> DynamicReceiver<'ch, T> {
///
/// See [`Channel::recv()`].
pub fn recv(&self) -> DynamicRecvFuture<'_, T> {
DynamicRecvFuture {
channel: self.channel,
}
DynamicRecvFuture { channel: self.channel }
}
/// Attempt to immediately receive the next value.
@ -282,11 +271,7 @@ impl<'ch, T> Future for DynamicSendFuture<'ch, T> {
impl<'ch, T> Unpin for DynamicSendFuture<'ch, T> {}
trait DynamicChannel<T> {
fn try_send_with_context(
&self,
message: T,
cx: Option<&mut Context<'_>>,
) -> Result<(), TrySendError<T>>;
fn try_send_with_context(&self, message: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>>;
fn try_recv_with_context(&self, cx: Option<&mut Context<'_>>) -> Result<T, TryRecvError>;
}
@ -346,11 +331,7 @@ impl<T, const N: usize> ChannelState<T, N> {
self.try_send_with_context(message, None)
}
fn try_send_with_context(
&mut self,
message: T,
cx: Option<&mut Context<'_>>,
) -> Result<(), TrySendError<T>> {
fn try_send_with_context(&mut self, message: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
match self.queue.push_back(message) {
Ok(()) => {
self.receiver_waker.wake();
@ -425,11 +406,7 @@ where
self.lock(|c| c.try_recv_with_context(cx))
}
fn try_send_with_context(
&self,
m: T,
cx: Option<&mut Context<'_>>,
) -> Result<(), TrySendError<T>> {
fn try_send_with_context(&self, m: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
self.lock(|c| c.try_send_with_context(m, cx))
}
@ -491,11 +468,7 @@ impl<M, T, const N: usize> DynamicChannel<T> for Channel<M, T, N>
where
M: RawMutex,
{
fn try_send_with_context(
&self,
m: T,
cx: Option<&mut Context<'_>>,
) -> Result<(), TrySendError<T>> {
fn try_send_with_context(&self, m: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
Channel::try_send_with_context(self, m, cx)
}
@ -512,11 +485,10 @@ mod tests {
use futures_executor::ThreadPool;
use futures_timer::Delay;
use super::*;
use crate::blocking_mutex::raw::{CriticalSectionRawMutex, NoopRawMutex};
use crate::util::Forever;
use super::*;
fn capacity<T, const N: usize>(c: &ChannelState<T, N>) -> usize {
c.queue.capacity() - c.queue.len()
}

View File

@ -1,11 +1,11 @@
use core::marker::PhantomData;
use js_sys::Promise;
use wasm_bindgen::prelude::*;
use super::{
raw::{self, util::UninitCell},
Spawner,
};
use super::raw::util::UninitCell;
use super::raw::{self};
use super::Spawner;
/// WASM executor, wasm_bindgen to schedule tasks on the JS event loop.
pub struct Executor {

View File

@ -13,25 +13,25 @@ mod timer_queue;
pub(crate) mod util;
mod waker;
use atomic_polyfill::{AtomicU32, Ordering};
use core::cell::Cell;
use core::future::Future;
use core::pin::Pin;
use core::ptr::NonNull;
use core::task::{Context, Poll};
use core::{mem, ptr};
use atomic_polyfill::{AtomicU32, Ordering};
use critical_section::CriticalSection;
use self::run_queue::{RunQueue, RunQueueItem};
use self::util::UninitCell;
pub use self::waker::task_from_waker;
use super::SpawnToken;
#[cfg(feature = "time")]
use crate::time::driver::{self, AlarmHandle};
#[cfg(feature = "time")]
use crate::time::Instant;
pub use self::waker::task_from_waker;
/// Task is spawned (has a future)
pub(crate) const STATE_SPAWNED: u32 = 1 << 0;
/// Task is in the executor run queue
@ -97,8 +97,7 @@ impl TaskHeader {
}
// Mark it as scheduled
self.state
.store(state | STATE_RUN_QUEUED, Ordering::Relaxed);
self.state.store(state | STATE_RUN_QUEUED, Ordering::Relaxed);
// We have just marked the task as scheduled, so enqueue it.
let executor = &*self.executor.get();

View File

@ -1,6 +1,7 @@
use atomic_polyfill::{AtomicPtr, Ordering};
use core::ptr;
use core::ptr::NonNull;
use atomic_polyfill::{AtomicPtr, Ordering};
use critical_section::CriticalSection;
use super::TaskHeader;
@ -63,10 +64,7 @@ impl RunQueue {
while let Some(task) = NonNull::new(ptr) {
// If the task re-enqueues itself, the `next` pointer will get overwritten.
// Therefore, first read the next pointer, and only then process the task.
let next = unsafe { task.as_ref() }
.run_queue_item
.next
.load(Ordering::Relaxed);
let next = unsafe { task.as_ref() }.run_queue_item.next.load(Ordering::Relaxed);
on_task(task);

View File

@ -1,9 +1,10 @@
use atomic_polyfill::Ordering;
use core::cell::Cell;
use core::cmp::min;
use core::ptr;
use core::ptr::NonNull;
use atomic_polyfill::Ordering;
use super::{TaskHeader, STATE_TIMER_QUEUED};
use crate::time::Instant;
@ -54,11 +55,7 @@ impl TimerQueue {
res
}
pub(crate) unsafe fn dequeue_expired(
&self,
now: Instant,
on_task: impl Fn(NonNull<TaskHeader>),
) {
pub(crate) unsafe fn dequeue_expired(&self, now: Instant, on_task: impl Fn(NonNull<TaskHeader>)) {
self.retain(|p| {
let task = p.as_ref();
if task.expires_at.get() <= now {

View File

@ -2,6 +2,7 @@ use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use core::task::Poll;
use futures::future::poll_fn;
use super::raw;

View File

@ -1,8 +1,5 @@
#![cfg_attr(not(any(feature = "std", feature = "wasm")), no_std)]
#![cfg_attr(
feature = "nightly",
feature(generic_associated_types, type_alias_impl_trait)
)]
#![cfg_attr(feature = "nightly", feature(generic_associated_types, type_alias_impl_trait))]
#![allow(clippy::new_without_default)]
// This mod MUST go first, so that the others see its macros.

View File

@ -7,6 +7,7 @@
use core::cell::{RefCell, UnsafeCell};
use core::ops::{Deref, DerefMut};
use core::task::Poll;
use futures::future::poll_fn;
use crate::blocking_mutex::raw::RawMutex;

View File

@ -56,9 +56,10 @@ cfg_if::cfg_if! {
}
mod eh02 {
use super::*;
use embedded_hal_02::blocking::delay::{DelayMs, DelayUs};
use super::*;
impl DelayMs<u8> for Delay {
fn delay_ms(&mut self, ms: u8) {
block_for(Duration::from_millis(ms as u64))

View File

@ -1,11 +1,10 @@
use atomic_polyfill::{AtomicU8, Ordering};
use std::cell::UnsafeCell;
use std::mem;
use std::mem::MaybeUninit;
use std::sync::{Condvar, Mutex, Once};
use std::time::Duration as StdDuration;
use std::time::Instant as StdInstant;
use std::{ptr, thread};
use std::time::{Duration as StdDuration, Instant as StdInstant};
use std::{mem, ptr, thread};
use atomic_polyfill::{AtomicU8, Ordering};
use crate::time::driver::{AlarmHandle, Driver};
@ -106,15 +105,13 @@ impl Driver for TimeDriver {
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> {
let id = self
.alarm_count
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),

View File

@ -1,8 +1,9 @@
use atomic_polyfill::{AtomicU8, Ordering};
use std::cell::UnsafeCell;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::{Mutex, Once};
use atomic_polyfill::{AtomicU8, Ordering};
use wasm_bindgen::prelude::*;
use wasm_timer::Instant as StdInstant;
@ -66,15 +67,13 @@ impl Driver for TimeDriver {
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> {
let id = self
.alarm_count
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),

View File

@ -65,30 +65,22 @@ impl Duration {
/// Adds one Duration to another, returning a new Duration or None in the event of an overflow.
pub fn checked_add(self, rhs: Duration) -> Option<Duration> {
self.ticks
.checked_add(rhs.ticks)
.map(|ticks| Duration { ticks })
self.ticks.checked_add(rhs.ticks).map(|ticks| Duration { ticks })
}
/// Subtracts one Duration to another, returning a new Duration or None in the event of an overflow.
pub fn checked_sub(self, rhs: Duration) -> Option<Duration> {
self.ticks
.checked_sub(rhs.ticks)
.map(|ticks| Duration { ticks })
self.ticks.checked_sub(rhs.ticks).map(|ticks| Duration { ticks })
}
/// Multiplies one Duration by a scalar u32, returning a new Duration or None in the event of an overflow.
pub fn checked_mul(self, rhs: u32) -> Option<Duration> {
self.ticks
.checked_mul(rhs as _)
.map(|ticks| Duration { ticks })
self.ticks.checked_mul(rhs as _).map(|ticks| Duration { ticks })
}
/// Divides one Duration a scalar u32, returning a new Duration or None in the event of an overflow.
pub fn checked_div(self, rhs: u32) -> Option<Duration> {
self.ticks
.checked_div(rhs as _)
.map(|ticks| Duration { ticks })
self.ticks.checked_div(rhs as _).map(|ticks| Duration { ticks })
}
}
@ -96,8 +88,7 @@ impl Add for Duration {
type Output = Duration;
fn add(self, rhs: Duration) -> Duration {
self.checked_add(rhs)
.expect("overflow when adding durations")
self.checked_add(rhs).expect("overflow when adding durations")
}
}
@ -111,8 +102,7 @@ impl Sub for Duration {
type Output = Duration;
fn sub(self, rhs: Duration) -> Duration {
self.checked_sub(rhs)
.expect("overflow when subtracting durations")
self.checked_sub(rhs).expect("overflow when subtracting durations")
}
}

View File

@ -18,9 +18,7 @@ impl Instant {
/// Returns an Instant representing the current time.
pub fn now() -> Instant {
Instant {
ticks: driver::now(),
}
Instant { ticks: driver::now() }
}
/// Create an Instant from a tick count since system boot.
@ -107,16 +105,12 @@ impl Instant {
/// Adds one Duration to self, returning a new `Instant` or None in the event of an overflow.
pub fn checked_add(&self, duration: Duration) -> Option<Instant> {
self.ticks
.checked_add(duration.ticks)
.map(|ticks| Instant { ticks })
self.ticks.checked_add(duration.ticks).map(|ticks| Instant { ticks })
}
/// Subtracts one Duration to self, returning a new `Instant` or None in the event of an overflow.
pub fn checked_sub(&self, duration: Duration) -> Option<Instant> {
self.ticks
.checked_sub(duration.ticks)
.map(|ticks| Instant { ticks })
self.ticks.checked_sub(duration.ticks).map(|ticks| Instant { ticks })
}
}

View File

@ -1,7 +1,9 @@
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
use futures::{future::select, future::Either, pin_mut, Stream};
use futures::future::{select, Either};
use futures::{pin_mut, Stream};
use crate::executor::raw;
use crate::time::{Duration, Instant};
@ -128,10 +130,7 @@ impl Ticker {
/// Creates a new ticker that ticks at the specified duration interval.
pub fn every(duration: Duration) -> Self {
let expires_at = Instant::now() + duration;
Self {
expires_at,
duration,
}
Self { expires_at, duration }
}
}

View File

@ -1,7 +1,8 @@
use atomic_polyfill::{compiler_fence, AtomicPtr, Ordering};
use core::ptr::{self, NonNull};
use core::task::Waker;
use atomic_polyfill::{compiler_fence, AtomicPtr, Ordering};
use crate::executor::raw::{task_from_waker, wake_task, TaskHeader};
/// Utility struct to register and wake a waker.