1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::{deadlock, util};
use core::{
sync::atomic::{AtomicU8, Ordering},
time::Duration,
};
use instant::Instant;
use lock_api::RawMutex as RawMutex_;
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
// UnparkToken used to indicate that the mutex is being handed off to the target
// thread directly without unlocking it.
pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
/// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread.
const LOCKED_BIT: u8 = 0b01;
/// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being
/// parked if it wants to lock the mutex, but it is currently being held by some other thread.
const PARKED_BIT: u8 = 0b10;
/// Raw mutex type backed by the parking lot.
pub struct RawMutex {
/// This atomic integer holds the current state of the mutex instance. Only the two lowest bits
/// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits.
///
/// # State table:
///
/// PARKED_BIT | LOCKED_BIT | Description
/// 0 | 0 | The mutex is not locked, nor is anyone waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 0 | 1 | The mutex is locked by exactly one thread. No other thread is
/// | | waiting for it.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 0 | The mutex is not locked. One or more thread is parked or about to
/// | | park. At least one of the parked threads are just about to be
/// | | unparked, or a thread heading for parking might abort the park.
/// -----------+------------+------------------------------------------------------------------
/// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is
/// | | parked or about to park, waiting for the lock to become available.
/// | | In this state, PARKED_BIT is only ever cleared when a bucket lock
/// | | is held (i.e. in a parking_lot_core callback). This ensures that
/// | | we never end up in a situation where there are parked threads but
/// | | PARKED_BIT is not set (which would result in those threads
/// | | potentially never getting woken up).
state: AtomicU8,
}
unsafe impl lock_api::RawMutex for RawMutex {
const INIT: RawMutex = RawMutex {
state: AtomicU8::new(0),
};
type GuardMarker = crate::GuardMarker;
#[inline]
fn lock(&self) {
if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
self.lock_slow(None);
}
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
#[inline]
fn try_lock(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT != 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
return true;
}
Err(x) => state = x,
}
}
}
#[inline]
unsafe fn unlock(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(false);
}
#[inline]
fn is_locked(&self) -> bool {
let state = self.state.load(Ordering::Relaxed);
state & LOCKED_BIT != 0
}
}
unsafe impl lock_api::RawMutexFair for RawMutex {
#[inline]
unsafe fn unlock_fair(&self) {
deadlock::release_resource(self as *const _ as usize);
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(true);
}
#[inline]
unsafe fn bump(&self) {
if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
self.bump_slow();
}
}
}
unsafe impl lock_api::RawMutexTimed for RawMutex {
type Duration = Duration;
type Instant = Instant;
#[inline]
fn try_lock_until(&self, timeout: Instant) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(Some(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
#[inline]
fn try_lock_for(&self, timeout: Duration) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(util::to_deadline(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
}
impl RawMutex {
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked_if_locked(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
}
}
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked(&self) {
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
#[cold]
fn lock_slow(&self, timeout: Option<Instant>) -> bool {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if state & LOCKED_BIT == 0 {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Set the parked bit
if state & PARKED_BIT == 0 {
if let Err(x) = self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
state = x;
continue;
}
}
// Park our thread until we are woken up by an unlock
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, was_last_thread| {
// Clear the parked bit if we were the last parked thread
if was_last_thread {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
// SAFETY:
// * `addr` is an address we control.
// * `validate`/`timed_out` does not panic or call into any function of `parking_lot`.
// * `before_sleep` does not call `park`, nor does it panic.
match unsafe {
parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
timeout,
)
} {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
// We were unparked normally, try acquiring the lock again
ParkResult::Unparked(_) => (),
// The validation function failed, try locking again
ParkResult::Invalid => (),
// Timeout expired
ParkResult::TimedOut => return false,
}
// Loop back and try locking again
spinwait.reset();
state = self.state.load(Ordering::Relaxed);
}
}
#[cold]
fn unlock_slow(&self, force_fair: bool) {
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// mutex locked and hand it off to the unparked thread.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
// Clear the parked bit if there are no more parked
// threads.
if !result.have_more_threads {
self.state.store(LOCKED_BIT, Ordering::Relaxed);
}
return TOKEN_HANDOFF;
}
// Clear the locked bit, and the parked bit as well if there
// are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
self.state.store(0, Ordering::Release);
}
TOKEN_NORMAL
};
// SAFETY:
// * `addr` is an address we control.
// * `callback` does not panic or call into any function of `parking_lot`.
unsafe {
parking_lot_core::unpark_one(addr, callback);
}
}
#[cold]
fn bump_slow(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
self.unlock_slow(true);
self.lock();
}
}