use core::cmp;
use core::hint;
use core::num::Wrapping;
use core::ops;
use core::ptr;
use core::sync::atomic::{AtomicUsize, Ordering};
use bytemuck::NoUninit;
#[repr(align(64))]
struct SpinLock(AtomicUsize);
impl SpinLock {
fn lock(&self) {
while self
.0
.compare_exchange_weak(0, 1, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
while self.0.load(Ordering::Relaxed) != 0 {
hint::spin_loop();
}
}
}
fn unlock(&self) {
self.0.store(0, Ordering::Release);
}
}
macro_rules! array {
(@accum (0, $($_es:expr),*) -> ($($body:tt)*))
=> {array!(@as_expr [$($body)*])};
(@accum (1, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (0, $($es),*) -> ($($body)* $($es,)*))};
(@accum (2, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (0, $($es),*) -> ($($body)* $($es,)* $($es,)*))};
(@accum (4, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (2, $($es,)* $($es),*) -> ($($body)*))};
(@accum (8, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (4, $($es,)* $($es),*) -> ($($body)*))};
(@accum (16, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (8, $($es,)* $($es),*) -> ($($body)*))};
(@accum (32, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (16, $($es,)* $($es),*) -> ($($body)*))};
(@accum (64, $($es:expr),*) -> ($($body:tt)*))
=> {array!(@accum (32, $($es,)* $($es),*) -> ($($body)*))};
(@as_expr $e:expr) => {$e};
[$e:expr; $n:tt] => { array!(@accum ($n, $e) -> ()) };
}
static SPINLOCKS: [SpinLock; 64] = array![SpinLock(AtomicUsize::new(0)); 64];
#[inline]
fn lock_for_addr(addr: usize) -> &'static SpinLock {
let mut hash = addr >> 4;
let low = hash & (SPINLOCKS.len() - 1);
hash >>= 16;
hash ^= low;
&SPINLOCKS[hash & (SPINLOCKS.len() - 1)]
}
#[inline]
fn lock(addr: usize) -> LockGuard {
let lock = lock_for_addr(addr);
lock.lock();
LockGuard(lock)
}
struct LockGuard(&'static SpinLock);
impl Drop for LockGuard {
#[inline]
fn drop(&mut self) {
self.0.unlock();
}
}
#[inline]
pub unsafe fn atomic_load<T>(dst: *mut T) -> T {
let _l = lock(dst as usize);
ptr::read(dst)
}
#[inline]
pub unsafe fn atomic_store<T>(dst: *mut T, val: T) {
let _l = lock(dst as usize);
ptr::write(dst, val);
}
#[inline]
pub unsafe fn atomic_swap<T>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
ptr::replace(dst, val)
}
#[inline]
pub unsafe fn atomic_compare_exchange<T: NoUninit>(
dst: *mut T,
current: T,
new: T,
) -> Result<T, T> {
let _l = lock(dst as usize);
let result = ptr::read(dst);
let a = bytemuck::bytes_of(&result);
let b = bytemuck::bytes_of(¤t);
if a == b {
ptr::write(dst, new);
Ok(result)
} else {
Err(result)
}
}
#[inline]
pub unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T) -> T
where
Wrapping<T>: ops::Add<Output = Wrapping<T>>,
{
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, (Wrapping(result) + Wrapping(val)).0);
result
}
#[inline]
pub unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T) -> T
where
Wrapping<T>: ops::Sub<Output = Wrapping<T>>,
{
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, (Wrapping(result) - Wrapping(val)).0);
result
}
#[inline]
pub unsafe fn atomic_and<T: Copy + ops::BitAnd<Output = T>>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, result & val);
result
}
#[inline]
pub unsafe fn atomic_or<T: Copy + ops::BitOr<Output = T>>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, result | val);
result
}
#[inline]
pub unsafe fn atomic_xor<T: Copy + ops::BitXor<Output = T>>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, result ^ val);
result
}
#[inline]
pub unsafe fn atomic_min<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, cmp::min(result, val));
result
}
#[inline]
pub unsafe fn atomic_max<T: Copy + cmp::Ord>(dst: *mut T, val: T) -> T {
let _l = lock(dst as usize);
let result = ptr::read(dst);
ptr::write(dst, cmp::max(result, val));
result
}