1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
//! This module has containers for storing the tasks spawned on a scheduler. The
//! `OwnedTasks` container is thread-safe but can only store tasks that
//! implement Send. The `LocalOwnedTasks` container is not thread safe, but can
//! store non-Send tasks.
//!
//! The collections can be closed to prevent adding new tasks during shutdown of
//! the scheduler with the collection.
use crate::future::Future;
use crate::loom::cell::UnsafeCell;
use crate::runtime::task::{JoinHandle, LocalNotified, Notified, Schedule, Task};
use crate::util::linked_list::{Link, LinkedList};
use crate::util::sharded_list;
use crate::loom::sync::atomic::{AtomicBool, Ordering};
use std::marker::PhantomData;
use std::num::NonZeroU64;
// The id from the module below is used to verify whether a given task is stored
// in this OwnedTasks, or some other task. The counter starts at one so we can
// use `None` for tasks not owned by any list.
//
// The safety checks in this file can technically be violated if the counter is
// overflown, but the checks are not supposed to ever fail unless there is a
// bug in Tokio, so we accept that certain bugs would not be caught if the two
// mixed up runtimes happen to have the same id.
cfg_has_atomic_u64! {
use std::sync::atomic::AtomicU64;
static NEXT_OWNED_TASKS_ID: AtomicU64 = AtomicU64::new(1);
fn get_next_id() -> NonZeroU64 {
loop {
let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed);
if let Some(id) = NonZeroU64::new(id) {
return id;
}
}
}
}
cfg_not_has_atomic_u64! {
use std::sync::atomic::AtomicU32;
static NEXT_OWNED_TASKS_ID: AtomicU32 = AtomicU32::new(1);
fn get_next_id() -> NonZeroU64 {
loop {
let id = NEXT_OWNED_TASKS_ID.fetch_add(1, Ordering::Relaxed);
if let Some(id) = NonZeroU64::new(u64::from(id)) {
return id;
}
}
}
}
pub(crate) struct OwnedTasks<S: 'static> {
list: List<S>,
pub(crate) id: NonZeroU64,
closed: AtomicBool,
}
type List<S> = sharded_list::ShardedList<Task<S>, <Task<S> as Link>::Target>;
pub(crate) struct LocalOwnedTasks<S: 'static> {
inner: UnsafeCell<OwnedTasksInner<S>>,
pub(crate) id: NonZeroU64,
_not_send_or_sync: PhantomData<*const ()>,
}
struct OwnedTasksInner<S: 'static> {
list: LinkedList<Task<S>, <Task<S> as Link>::Target>,
closed: bool,
}
impl<S: 'static> OwnedTasks<S> {
pub(crate) fn new(num_cores: usize) -> Self {
let shard_size = Self::gen_shared_list_size(num_cores);
Self {
list: List::new(shard_size),
closed: AtomicBool::new(false),
id: get_next_id(),
}
}
/// Binds the provided task to this `OwnedTasks` instance. This fails if the
/// `OwnedTasks` has been closed.
pub(crate) fn bind<T>(
&self,
task: T,
scheduler: S,
id: super::Id,
) -> (JoinHandle<T::Output>, Option<Notified<S>>)
where
S: Schedule,
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let (task, notified, join) = super::new_task(task, scheduler, id);
let notified = unsafe { self.bind_inner(task, notified) };
(join, notified)
}
/// The part of `bind` that's the same for every type of future.
unsafe fn bind_inner(&self, task: Task<S>, notified: Notified<S>) -> Option<Notified<S>>
where
S: Schedule,
{
unsafe {
// safety: We just created the task, so we have exclusive access
// to the field.
task.header().set_owner_id(self.id);
}
let shard = self.list.lock_shard(&task);
// Check the closed flag in the lock for ensuring all that tasks
// will shut down after the OwnedTasks has been closed.
if self.closed.load(Ordering::Acquire) {
drop(shard);
task.shutdown();
return None;
}
shard.push(task);
Some(notified)
}
/// Asserts that the given task is owned by this `OwnedTasks` and convert it to
/// a `LocalNotified`, giving the thread permission to poll this task.
#[inline]
pub(crate) fn assert_owner(&self, task: Notified<S>) -> LocalNotified<S> {
debug_assert_eq!(task.header().get_owner_id(), Some(self.id));
// safety: All tasks bound to this OwnedTasks are Send, so it is safe
// to poll it on this thread no matter what thread we are on.
LocalNotified {
task: task.0,
_not_send: PhantomData,
}
}
/// Shuts down all tasks in the collection. This call also closes the
/// collection, preventing new items from being added.
///
/// The parameter start determines which shard this method will start at.
/// Using different values for each worker thread reduces contention.
pub(crate) fn close_and_shutdown_all(&self, start: usize)
where
S: Schedule,
{
self.closed.store(true, Ordering::Release);
for i in start..self.get_shard_size() + start {
loop {
let task = self.list.pop_back(i);
match task {
Some(task) => {
task.shutdown();
}
None => break,
}
}
}
}
#[inline]
pub(crate) fn get_shard_size(&self) -> usize {
self.list.shard_size()
}
pub(crate) fn num_alive_tasks(&self) -> usize {
self.list.len()
}
cfg_64bit_metrics! {
pub(crate) fn spawned_tasks_count(&self) -> u64 {
self.list.added()
}
}
pub(crate) fn remove(&self, task: &Task<S>) -> Option<Task<S>> {
// If the task's owner ID is `None` then it is not part of any list and
// doesn't need removing.
let task_id = task.header().get_owner_id()?;
assert_eq!(task_id, self.id);
// safety: We just checked that the provided task is not in some other
// linked list.
unsafe { self.list.remove(task.header_ptr()) }
}
pub(crate) fn is_empty(&self) -> bool {
self.list.is_empty()
}
/// Generates the size of the sharded list based on the number of worker threads.
///
/// The sharded lock design can effectively alleviate
/// lock contention performance problems caused by high concurrency.
///
/// However, as the number of shards increases, the memory continuity between
/// nodes in the intrusive linked list will diminish. Furthermore,
/// the construction time of the sharded list will also increase with a higher number of shards.
///
/// Due to the above reasons, we set a maximum value for the shared list size,
/// denoted as `MAX_SHARED_LIST_SIZE`.
fn gen_shared_list_size(num_cores: usize) -> usize {
const MAX_SHARED_LIST_SIZE: usize = 1 << 16;
usize::min(MAX_SHARED_LIST_SIZE, num_cores.next_power_of_two() * 4)
}
}
cfg_taskdump! {
impl<S: 'static> OwnedTasks<S> {
/// Locks the tasks, and calls `f` on an iterator over them.
pub(crate) fn for_each<F>(&self, f: F)
where
F: FnMut(&Task<S>),
{
self.list.for_each(f);
}
}
}
impl<S: 'static> LocalOwnedTasks<S> {
pub(crate) fn new() -> Self {
Self {
inner: UnsafeCell::new(OwnedTasksInner {
list: LinkedList::new(),
closed: false,
}),
id: get_next_id(),
_not_send_or_sync: PhantomData,
}
}
pub(crate) fn bind<T>(
&self,
task: T,
scheduler: S,
id: super::Id,
) -> (JoinHandle<T::Output>, Option<Notified<S>>)
where
S: Schedule,
T: Future + 'static,
T::Output: 'static,
{
let (task, notified, join) = super::new_task(task, scheduler, id);
unsafe {
// safety: We just created the task, so we have exclusive access
// to the field.
task.header().set_owner_id(self.id);
}
if self.is_closed() {
drop(notified);
task.shutdown();
(join, None)
} else {
self.with_inner(|inner| {
inner.list.push_front(task);
});
(join, Some(notified))
}
}
/// Shuts down all tasks in the collection. This call also closes the
/// collection, preventing new items from being added.
pub(crate) fn close_and_shutdown_all(&self)
where
S: Schedule,
{
self.with_inner(|inner| inner.closed = true);
while let Some(task) = self.with_inner(|inner| inner.list.pop_back()) {
task.shutdown();
}
}
pub(crate) fn remove(&self, task: &Task<S>) -> Option<Task<S>> {
// If the task's owner ID is `None` then it is not part of any list and
// doesn't need removing.
let task_id = task.header().get_owner_id()?;
assert_eq!(task_id, self.id);
self.with_inner(|inner|
// safety: We just checked that the provided task is not in some
// other linked list.
unsafe { inner.list.remove(task.header_ptr()) })
}
/// Asserts that the given task is owned by this `LocalOwnedTasks` and convert
/// it to a `LocalNotified`, giving the thread permission to poll this task.
#[inline]
pub(crate) fn assert_owner(&self, task: Notified<S>) -> LocalNotified<S> {
assert_eq!(task.header().get_owner_id(), Some(self.id));
// safety: The task was bound to this LocalOwnedTasks, and the
// LocalOwnedTasks is not Send or Sync, so we are on the right thread
// for polling this task.
LocalNotified {
task: task.0,
_not_send: PhantomData,
}
}
#[inline]
fn with_inner<F, T>(&self, f: F) -> T
where
F: FnOnce(&mut OwnedTasksInner<S>) -> T,
{
// safety: This type is not Sync, so concurrent calls of this method
// can't happen. Furthermore, all uses of this method in this file make
// sure that they don't call `with_inner` recursively.
self.inner.with_mut(|ptr| unsafe { f(&mut *ptr) })
}
pub(crate) fn is_closed(&self) -> bool {
self.with_inner(|inner| inner.closed)
}
pub(crate) fn is_empty(&self) -> bool {
self.with_inner(|inner| inner.list.is_empty())
}
}
#[cfg(test)]
mod tests {
use super::*;
// This test may run in parallel with other tests, so we only test that ids
// come in increasing order.
#[test]
fn test_id_not_broken() {
let mut last_id = get_next_id();
for _ in 0..1000 {
let next_id = get_next_id();
assert!(last_id < next_id);
last_id = next_id;
}
}
}