|
19 | 19 | #[allow(dead_code, unreachable_pub)] |
20 | 20 | mod internal; |
21 | 21 | pub mod ordering; |
| 22 | +mod predefine; |
22 | 23 |
|
23 | 24 | pub use internal::AtomicImpl; |
24 | 25 | pub use ordering::{Acquire, Full, Relaxed, Release}; |
| 26 | + |
| 27 | +use crate::build_error; |
| 28 | +use internal::{AtomicBasicOps, AtomicRepr}; |
| 29 | +use ordering::OrderingType; |
| 30 | + |
| 31 | +/// A memory location which can be safely modified from multiple execution contexts. |
| 32 | +/// |
| 33 | +/// This has the same size, alignment and bit validity as the underlying type `T`. And it disables |
| 34 | +/// niche optimization for the same reason as [`UnsafeCell`]. |
| 35 | +/// |
| 36 | +/// The atomic operations are implemented in a way that is fully compatible with the [Linux Kernel |
| 37 | +/// Memory (Consistency) Model][LKMM], hence they should be modeled as the corresponding |
| 38 | +/// [`LKMM`][LKMM] atomic primitives. With the help of [`Atomic::from_ptr()`] and |
| 39 | +/// [`Atomic::as_ptr()`], this provides a way to interact with [C-side atomic operations] |
| 40 | +/// (including those without the `atomic` prefix, e.g. `READ_ONCE()`, `WRITE_ONCE()`, |
| 41 | +/// `smp_load_acquire()` and `smp_store_release()`). |
| 42 | +/// |
| 43 | +/// # Invariants |
| 44 | +/// |
| 45 | +/// `self.0` is a valid `T`. |
| 46 | +/// |
| 47 | +/// [`UnsafeCell`]: core::cell::UnsafeCell |
| 48 | +/// [LKMM]: srctree/tools/memory-model/ |
| 49 | +/// [C-side atomic operations]: srctree/Documentation/atomic_t.txt |
| 50 | +#[repr(transparent)] |
| 51 | +pub struct Atomic<T: AtomicType>(AtomicRepr<T::Repr>); |
| 52 | + |
| 53 | +// SAFETY: `Atomic<T>` is safe to share among execution contexts because all accesses are atomic. |
| 54 | +unsafe impl<T: AtomicType> Sync for Atomic<T> {} |
| 55 | + |
| 56 | +/// Types that support basic atomic operations. |
| 57 | +/// |
| 58 | +/// # Round-trip transmutability |
| 59 | +/// |
| 60 | +/// `T` is round-trip transmutable to `U` if and only if both of these properties hold: |
| 61 | +/// |
| 62 | +/// - Any valid bit pattern for `T` is also a valid bit pattern for `U`. |
| 63 | +/// - Transmuting (e.g. using [`transmute()`]) a value of type `T` to `U` and then to `T` again |
| 64 | +/// yields a value that is in all aspects equivalent to the original value. |
| 65 | +/// |
| 66 | +/// # Safety |
| 67 | +/// |
| 68 | +/// - [`Self`] must have the same size and alignment as [`Self::Repr`]. |
| 69 | +/// - [`Self`] must be [round-trip transmutable] to [`Self::Repr`]. |
| 70 | +/// |
| 71 | +/// Note that this is more relaxed than requiring the bi-directional transmutability (i.e. |
| 72 | +/// [`transmute()`] is always sound between `U` and `T`) because of the support for atomic |
| 73 | +/// variables over unit-only enums, see [Examples]. |
| 74 | +/// |
| 75 | +/// # Limitations |
| 76 | +/// |
| 77 | +/// Because C primitives are used to implement the atomic operations, and a C function requires a |
| 78 | +/// valid object of a type to operate on (i.e. no `MaybeUninit<_>`), hence at the Rust <-> C |
| 79 | +/// surface, only types with all the bits initialized can be passed. As a result, types like `(u8, |
| 80 | +/// u16)` (padding bytes are uninitialized) are currently not supported. |
| 81 | +/// |
| 82 | +/// # Examples |
| 83 | +/// |
| 84 | +/// A unit-only enum that implements [`AtomicType`]: |
| 85 | +/// |
| 86 | +/// ``` |
| 87 | +/// use kernel::sync::atomic::{AtomicType, Atomic, Relaxed}; |
| 88 | +/// |
| 89 | +/// #[derive(Clone, Copy, PartialEq, Eq)] |
| 90 | +/// #[repr(i32)] |
| 91 | +/// enum State { |
| 92 | +/// Uninit = 0, |
| 93 | +/// Working = 1, |
| 94 | +/// Done = 2, |
| 95 | +/// }; |
| 96 | +/// |
| 97 | +/// // SAFETY: `State` and `i32` has the same size and alignment, and it's round-trip |
| 98 | +/// // transmutable to `i32`. |
| 99 | +/// unsafe impl AtomicType for State { |
| 100 | +/// type Repr = i32; |
| 101 | +/// } |
| 102 | +/// |
| 103 | +/// let s = Atomic::new(State::Uninit); |
| 104 | +/// |
| 105 | +/// assert_eq!(State::Uninit, s.load(Relaxed)); |
| 106 | +/// ``` |
| 107 | +/// [`transmute()`]: core::mem::transmute |
| 108 | +/// [round-trip transmutable]: AtomicType#round-trip-transmutability |
| 109 | +/// [Examples]: AtomicType#examples |
| 110 | +pub unsafe trait AtomicType: Sized + Send + Copy { |
| 111 | + /// The backing atomic implementation type. |
| 112 | + type Repr: AtomicImpl; |
| 113 | +} |
| 114 | + |
| 115 | +#[inline(always)] |
| 116 | +const fn into_repr<T: AtomicType>(v: T) -> T::Repr { |
| 117 | + // SAFETY: Per the safety requirement of `AtomicType`, `T` is round-trip transmutable to |
| 118 | + // `T::Repr`, therefore the transmute operation is sound. |
| 119 | + unsafe { core::mem::transmute_copy(&v) } |
| 120 | +} |
| 121 | + |
| 122 | +/// # Safety |
| 123 | +/// |
| 124 | +/// `r` must be a valid bit pattern of `T`. |
| 125 | +#[inline(always)] |
| 126 | +const unsafe fn from_repr<T: AtomicType>(r: T::Repr) -> T { |
| 127 | + // SAFETY: Per the safety requirement of the function, the transmute operation is sound. |
| 128 | + unsafe { core::mem::transmute_copy(&r) } |
| 129 | +} |
| 130 | + |
| 131 | +impl<T: AtomicType> Atomic<T> { |
| 132 | + /// Creates a new atomic `T`. |
| 133 | + pub const fn new(v: T) -> Self { |
| 134 | + // INVARIANT: Per the safety requirement of `AtomicType`, `into_repr(v)` is a valid `T`. |
| 135 | + Self(AtomicRepr::new(into_repr(v))) |
| 136 | + } |
| 137 | + |
| 138 | + /// Creates a reference to an atomic `T` from a pointer of `T`. |
| 139 | + /// |
| 140 | + /// This usually is used when communicating with C side or manipulating a C struct, see |
| 141 | + /// examples below. |
| 142 | + /// |
| 143 | + /// # Safety |
| 144 | + /// |
| 145 | + /// - `ptr` is aligned to `align_of::<T>()`. |
| 146 | + /// - `ptr` is valid for reads and writes for `'a`. |
| 147 | + /// - For the duration of `'a`, other accesses to `*ptr` must not cause data races (defined |
| 148 | + /// by [`LKMM`]) against atomic operations on the returned reference. Note that if all other |
| 149 | + /// accesses are atomic, then this safety requirement is trivially fulfilled. |
| 150 | + /// |
| 151 | + /// [`LKMM`]: srctree/tools/memory-model |
| 152 | + /// |
| 153 | + /// # Examples |
| 154 | + /// |
| 155 | + /// Using [`Atomic::from_ptr()`] combined with [`Atomic::load()`] or [`Atomic::store()`] can |
| 156 | + /// achieve the same functionality as `READ_ONCE()`/`smp_load_acquire()` or |
| 157 | + /// `WRITE_ONCE()`/`smp_store_release()` in C side: |
| 158 | + /// |
| 159 | + /// ``` |
| 160 | + /// # use kernel::types::Opaque; |
| 161 | + /// use kernel::sync::atomic::{Atomic, Relaxed, Release}; |
| 162 | + /// |
| 163 | + /// // Assume there is a C struct `foo`. |
| 164 | + /// mod cbindings { |
| 165 | + /// #[repr(C)] |
| 166 | + /// pub(crate) struct foo { |
| 167 | + /// pub(crate) a: i32, |
| 168 | + /// pub(crate) b: i32 |
| 169 | + /// } |
| 170 | + /// } |
| 171 | + /// |
| 172 | + /// let tmp = Opaque::new(cbindings::foo { a: 1, b: 2 }); |
| 173 | + /// |
| 174 | + /// // struct foo *foo_ptr = ..; |
| 175 | + /// let foo_ptr = tmp.get(); |
| 176 | + /// |
| 177 | + /// // SAFETY: `foo_ptr` is valid, and `.a` is in bounds. |
| 178 | + /// let foo_a_ptr = unsafe { &raw mut (*foo_ptr).a }; |
| 179 | + /// |
| 180 | + /// // a = READ_ONCE(foo_ptr->a); |
| 181 | + /// // |
| 182 | + /// // SAFETY: `foo_a_ptr` is valid for read, and all other accesses on it is atomic, so no |
| 183 | + /// // data race. |
| 184 | + /// let a = unsafe { Atomic::from_ptr(foo_a_ptr) }.load(Relaxed); |
| 185 | + /// # assert_eq!(a, 1); |
| 186 | + /// |
| 187 | + /// // smp_store_release(&foo_ptr->a, 2); |
| 188 | + /// // |
| 189 | + /// // SAFETY: `foo_a_ptr` is valid for writes, and all other accesses on it is atomic, so |
| 190 | + /// // no data race. |
| 191 | + /// unsafe { Atomic::from_ptr(foo_a_ptr) }.store(2, Release); |
| 192 | + /// ``` |
| 193 | + pub unsafe fn from_ptr<'a>(ptr: *mut T) -> &'a Self |
| 194 | + where |
| 195 | + T: Sync, |
| 196 | + { |
| 197 | + // CAST: `T` and `Atomic<T>` have the same size, alignment and bit validity. |
| 198 | + // SAFETY: Per function safety requirement, `ptr` is a valid pointer and the object will |
| 199 | + // live long enough. It's safe to return a `&Atomic<T>` because function safety requirement |
| 200 | + // guarantees other accesses won't cause data races. |
| 201 | + unsafe { &*ptr.cast::<Self>() } |
| 202 | + } |
| 203 | + |
| 204 | + /// Returns a pointer to the underlying atomic `T`. |
| 205 | + /// |
| 206 | + /// Note that use of the return pointer must not cause data races defined by [`LKMM`]. |
| 207 | + /// |
| 208 | + /// # Guarantees |
| 209 | + /// |
| 210 | + /// The returned pointer is valid and properly aligned (i.e. aligned to [`align_of::<T>()`]). |
| 211 | + /// |
| 212 | + /// [`LKMM`]: srctree/tools/memory-model |
| 213 | + /// [`align_of::<T>()`]: core::mem::align_of |
| 214 | + pub const fn as_ptr(&self) -> *mut T { |
| 215 | + // GUARANTEE: Per the function guarantee of `AtomicRepr::as_ptr()`, the `self.0.as_ptr()` |
| 216 | + // must be a valid and properly aligned pointer for `T::Repr`, and per the safety guarantee |
| 217 | + // of `AtomicType`, it's a valid and properly aligned pointer of `T`. |
| 218 | + self.0.as_ptr().cast() |
| 219 | + } |
| 220 | + |
| 221 | + /// Returns a mutable reference to the underlying atomic `T`. |
| 222 | + /// |
| 223 | + /// This is safe because the mutable reference of the atomic `T` guarantees exclusive access. |
| 224 | + pub fn get_mut(&mut self) -> &mut T { |
| 225 | + // CAST: `T` and `T::Repr` has the same size and alignment per the safety requirement of |
| 226 | + // `AtomicType`, and per the type invariants `self.0` is a valid `T`, therefore the casting |
| 227 | + // result is a valid pointer of `T`. |
| 228 | + // SAFETY: The pointer is valid per the CAST comment above, and the mutable reference |
| 229 | + // guarantees exclusive access. |
| 230 | + unsafe { &mut *self.0.as_ptr().cast() } |
| 231 | + } |
| 232 | +} |
| 233 | + |
| 234 | +impl<T: AtomicType> Atomic<T> |
| 235 | +where |
| 236 | + T::Repr: AtomicBasicOps, |
| 237 | +{ |
| 238 | + /// Loads the value from the atomic `T`. |
| 239 | + /// |
| 240 | + /// # Examples |
| 241 | + /// |
| 242 | + /// ``` |
| 243 | + /// use kernel::sync::atomic::{Atomic, Relaxed}; |
| 244 | + /// |
| 245 | + /// let x = Atomic::new(42i32); |
| 246 | + /// |
| 247 | + /// assert_eq!(42, x.load(Relaxed)); |
| 248 | + /// |
| 249 | + /// let x = Atomic::new(42i64); |
| 250 | + /// |
| 251 | + /// assert_eq!(42, x.load(Relaxed)); |
| 252 | + /// ``` |
| 253 | + #[doc(alias("atomic_read", "atomic64_read"))] |
| 254 | + #[inline(always)] |
| 255 | + pub fn load<Ordering: ordering::AcquireOrRelaxed>(&self, _: Ordering) -> T { |
| 256 | + let v = { |
| 257 | + match Ordering::TYPE { |
| 258 | + OrderingType::Relaxed => T::Repr::atomic_read(&self.0), |
| 259 | + OrderingType::Acquire => T::Repr::atomic_read_acquire(&self.0), |
| 260 | + _ => build_error!("Wrong ordering"), |
| 261 | + } |
| 262 | + }; |
| 263 | + |
| 264 | + // SAFETY: `v` comes from reading `self.0`, which is a valid `T` per the type invariants. |
| 265 | + unsafe { from_repr(v) } |
| 266 | + } |
| 267 | + |
| 268 | + /// Stores a value to the atomic `T`. |
| 269 | + /// |
| 270 | + /// # Examples |
| 271 | + /// |
| 272 | + /// ``` |
| 273 | + /// use kernel::sync::atomic::{Atomic, Relaxed}; |
| 274 | + /// |
| 275 | + /// let x = Atomic::new(42i32); |
| 276 | + /// |
| 277 | + /// assert_eq!(42, x.load(Relaxed)); |
| 278 | + /// |
| 279 | + /// x.store(43, Relaxed); |
| 280 | + /// |
| 281 | + /// assert_eq!(43, x.load(Relaxed)); |
| 282 | + /// ``` |
| 283 | + #[doc(alias("atomic_set", "atomic64_set"))] |
| 284 | + #[inline(always)] |
| 285 | + pub fn store<Ordering: ordering::ReleaseOrRelaxed>(&self, v: T, _: Ordering) { |
| 286 | + let v = into_repr(v); |
| 287 | + |
| 288 | + // INVARIANT: `v` is a valid `T`, and is stored to `self.0` by `atomic_set*()`. |
| 289 | + match Ordering::TYPE { |
| 290 | + OrderingType::Relaxed => T::Repr::atomic_set(&self.0, v), |
| 291 | + OrderingType::Release => T::Repr::atomic_set_release(&self.0, v), |
| 292 | + _ => build_error!("Wrong ordering"), |
| 293 | + } |
| 294 | + } |
| 295 | +} |
0 commit comments