2020-12-26 16:42:44 +01:00
|
|
|
use core::ptr;
|
2021-02-02 05:14:52 +01:00
|
|
|
use core::ptr::NonNull;
|
2022-06-12 22:15:44 +02:00
|
|
|
|
|
|
|
use atomic_polyfill::{AtomicPtr, Ordering};
|
2021-10-15 23:38:44 +02:00
|
|
|
use critical_section::CriticalSection;
|
2020-12-26 16:42:44 +01:00
|
|
|
|
2023-01-29 19:55:06 +01:00
|
|
|
use super::{TaskHeader, TaskRef};
|
2020-12-26 16:42:44 +01:00
|
|
|
|
|
|
|
pub(crate) struct RunQueueItem {
|
2021-03-18 00:20:02 +01:00
|
|
|
next: AtomicPtr<TaskHeader>,
|
2020-12-26 16:42:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl RunQueueItem {
|
|
|
|
pub const fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
next: AtomicPtr::new(ptr::null_mut()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Atomic task queue using a very, very simple lock-free linked-list queue:
|
|
|
|
///
|
|
|
|
/// To enqueue a task, task.next is set to the old head, and head is atomically set to task.
|
|
|
|
///
|
|
|
|
/// Dequeuing is done in batches: the queue is emptied by atomically replacing head with
|
|
|
|
/// null. Then the batch is iterated following the next pointers until null is reached.
|
|
|
|
///
|
|
|
|
/// Note that batches will be iterated in the reverse order as they were enqueued. This is OK
|
2021-10-11 13:34:50 +02:00
|
|
|
/// for our purposes: it can't create fairness problems since the next batch won't run until the
|
2020-12-26 16:42:44 +01:00
|
|
|
/// current batch is completely processed, so even if a task enqueues itself instantly (for example
|
|
|
|
/// by waking its own waker) can't prevent other tasks from running.
|
|
|
|
pub(crate) struct RunQueue {
|
2021-03-18 00:20:02 +01:00
|
|
|
head: AtomicPtr<TaskHeader>,
|
2020-12-26 16:42:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl RunQueue {
|
|
|
|
pub const fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
head: AtomicPtr::new(ptr::null_mut()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Enqueues an item. Returns true if the queue was empty.
|
2021-08-26 00:20:52 +02:00
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
///
|
|
|
|
/// `item` must NOT be already enqueued in any queue.
|
2021-10-16 01:49:30 +02:00
|
|
|
#[inline(always)]
|
2023-01-29 19:55:06 +01:00
|
|
|
pub(crate) unsafe fn enqueue(&self, _cs: CriticalSection, task: TaskRef) -> bool {
|
2021-10-15 23:38:44 +02:00
|
|
|
let prev = self.head.load(Ordering::Relaxed);
|
2023-01-29 19:55:06 +01:00
|
|
|
task.header().run_queue_item.next.store(prev, Ordering::Relaxed);
|
|
|
|
self.head.store(task.as_ptr() as _, Ordering::Relaxed);
|
2020-12-26 16:42:44 +01:00
|
|
|
prev.is_null()
|
|
|
|
}
|
|
|
|
|
2021-08-26 00:20:52 +02:00
|
|
|
/// Empty the queue, then call `on_task` for each task that was in the queue.
|
|
|
|
/// NOTE: It is OK for `on_task` to enqueue more tasks. In this case they're left in the queue
|
|
|
|
/// and will be processed by the *next* call to `dequeue_all`, *not* the current one.
|
2023-01-29 19:55:06 +01:00
|
|
|
pub(crate) fn dequeue_all(&self, on_task: impl Fn(TaskRef)) {
|
2021-08-26 00:20:52 +02:00
|
|
|
// Atomically empty the queue.
|
|
|
|
let mut ptr = self.head.swap(ptr::null_mut(), Ordering::AcqRel);
|
2020-12-26 16:42:44 +01:00
|
|
|
|
2021-08-26 00:20:52 +02:00
|
|
|
// Iterate the linked list of tasks that were previously in the queue.
|
|
|
|
while let Some(task) = NonNull::new(ptr) {
|
2023-01-29 19:55:06 +01:00
|
|
|
let task = unsafe { TaskRef::from_ptr(task.as_ptr()) };
|
2020-12-26 16:42:44 +01:00
|
|
|
// If the task re-enqueues itself, the `next` pointer will get overwritten.
|
|
|
|
// Therefore, first read the next pointer, and only then process the task.
|
2023-01-29 19:55:06 +01:00
|
|
|
let next = task.header().run_queue_item.next.load(Ordering::Relaxed);
|
2020-12-26 16:42:44 +01:00
|
|
|
|
2021-08-26 00:20:52 +02:00
|
|
|
on_task(task);
|
2020-12-26 16:42:44 +01:00
|
|
|
|
2021-08-26 00:20:52 +02:00
|
|
|
ptr = next
|
2020-12-26 16:42:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|