Skip to content

Commit 87b59fd

Browse files
hoshinolinajannau
authored andcommitted
rust: drm: mm: Add DRM MM Range Allocator abstraction
drm_mm provides a simple range allocator, useful for managing virtual address ranges. Add a Rust abstraction to expose this module to Rust drivers. Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent 23b71c1 commit 87b59fd

2 files changed

Lines changed: 311 additions & 0 deletions

File tree

rust/kernel/drm/mm.rs

Lines changed: 310 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,310 @@
1+
// SPDX-License-Identifier: GPL-2.0 OR MIT
2+
3+
//! DRM MM range allocator
4+
//!
5+
//! C header: [`include/drm/drm_mm.h`](../../../../include/drm/drm_mm.h)
6+
7+
use crate::{
8+
alloc::flags::*,
9+
bindings,
10+
error::{to_result, Result},
11+
sync::{new_mutex, Arc, Mutex, UniqueArc},
12+
types::Opaque,
13+
};
14+
15+
use crate::init::InPlaceInit;
16+
use crate::prelude::KBox;
17+
18+
use core::{
19+
marker::{PhantomData, PhantomPinned},
20+
ops::Deref,
21+
pin::Pin,
22+
};
23+
24+
/// Type alias representing a DRM MM node.
25+
pub type Node<A, T> = Pin<KBox<NodeData<A, T>>>;
26+
27+
/// Trait which must be implemented by the inner allocator state type provided by the user.
28+
pub trait AllocInner<T> {
29+
/// Notification that a node was dropped from the allocator.
30+
fn drop_object(&mut self, _start: u64, _size: u64, _color: usize, _object: &mut T) {}
31+
}
32+
33+
impl<T> AllocInner<T> for () {}
34+
35+
/// Wrapper type for a `struct drm_mm` plus user AllocInner object.
36+
///
37+
/// # Invariants
38+
/// The `drm_mm` struct is valid and initialized.
39+
struct MmInner<A: AllocInner<T>, T>(Opaque<bindings::drm_mm>, A, PhantomData<T>);
40+
41+
/// Represents a single allocated node in the MM allocator
42+
pub struct NodeData<A: AllocInner<T>, T> {
43+
node: bindings::drm_mm_node,
44+
mm: Arc<Mutex<MmInner<A, T>>>,
45+
valid: bool,
46+
/// A drm_mm_node needs to be pinned because nodes reference each other in a linked list.
47+
_pin: PhantomPinned,
48+
inner: T,
49+
}
50+
51+
// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
52+
unsafe impl<A: Send + AllocInner<T>, T: Send> Send for NodeData<A, T> {}
53+
// SAFETY: Allocator ops take the mutex, and there are no mutable actions on the node.
54+
unsafe impl<A: Send + AllocInner<T>, T: Sync> Sync for NodeData<A, T> {}
55+
56+
/// Available MM node insertion modes
57+
#[repr(u32)]
58+
pub enum InsertMode {
59+
/// Search for the smallest hole (within the search range) that fits the desired node.
60+
///
61+
/// Allocates the node from the bottom of the found hole.
62+
Best = bindings::drm_mm_insert_mode_DRM_MM_INSERT_BEST,
63+
64+
/// Search for the lowest hole (address closest to 0, within the search range) that fits the
65+
/// desired node.
66+
///
67+
/// Allocates the node from the bottom of the found hole.
68+
Low = bindings::drm_mm_insert_mode_DRM_MM_INSERT_LOW,
69+
70+
/// Search for the highest hole (address closest to U64_MAX, within the search range) that fits
71+
/// the desired node.
72+
///
73+
/// Allocates the node from the top of the found hole. The specified alignment for the node is
74+
/// applied to the base of the node (`Node.start()`).
75+
High = bindings::drm_mm_insert_mode_DRM_MM_INSERT_HIGH,
76+
77+
/// Search for the most recently evicted hole (within the search range) that fits the desired
78+
/// node. This is appropriate for use immediately after performing an eviction scan and removing
79+
/// the selected nodes to form a hole.
80+
///
81+
/// Allocates the node from the bottom of the found hole.
82+
Evict = bindings::drm_mm_insert_mode_DRM_MM_INSERT_EVICT,
83+
}
84+
85+
/// A clonable, interlocked reference to the allocator state.
86+
///
87+
/// This is useful to perform actions on the user-supplied `AllocInner<T>` type given just a Node,
88+
/// without immediately taking the lock.
89+
#[derive(Clone)]
90+
pub struct InnerRef<A: AllocInner<T>, T>(Arc<Mutex<MmInner<A, T>>>);
91+
92+
impl<A: AllocInner<T>, T> InnerRef<A, T> {
93+
/// Operate on the user `AllocInner<T>` implementation, taking the lock.
94+
pub fn with<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
95+
let mut l = self.0.lock();
96+
cb(&mut l.1)
97+
}
98+
}
99+
100+
impl<A: AllocInner<T>, T> NodeData<A, T> {
101+
/// Returns the color of the node (an opaque value)
102+
pub fn color(&self) -> usize {
103+
self.node.color as usize
104+
}
105+
106+
/// Returns the start address of the node
107+
pub fn start(&self) -> u64 {
108+
self.node.start
109+
}
110+
111+
/// Returns the size of the node in bytes
112+
pub fn size(&self) -> u64 {
113+
self.node.size
114+
}
115+
116+
/// Operate on the user `AllocInner<T>` implementation associated with this node's allocator.
117+
pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
118+
let mut l = self.mm.lock();
119+
cb(&mut l.1)
120+
}
121+
122+
/// Return a clonable, detached reference to the allocator inner data.
123+
pub fn alloc_ref(&self) -> InnerRef<A, T> {
124+
InnerRef(self.mm.clone())
125+
}
126+
127+
/// Return a mutable reference to the inner data.
128+
pub fn inner_mut(self: Pin<&mut Self>) -> &mut T {
129+
// SAFETY: This is okay because inner is not structural
130+
unsafe { &mut self.get_unchecked_mut().inner }
131+
}
132+
}
133+
134+
impl<A: AllocInner<T>, T> Deref for NodeData<A, T> {
135+
type Target = T;
136+
137+
fn deref(&self) -> &Self::Target {
138+
&self.inner
139+
}
140+
}
141+
142+
impl<A: AllocInner<T>, T> Drop for NodeData<A, T> {
143+
fn drop(&mut self) {
144+
if self.valid {
145+
let mut guard = self.mm.lock();
146+
147+
// Inform the user allocator that a node is being dropped.
148+
guard
149+
.1
150+
.drop_object(self.start(), self.size(), self.color(), &mut self.inner);
151+
// SAFETY: The MM lock is still taken, so we can safely remove the node.
152+
unsafe { bindings::drm_mm_remove_node(&mut self.node) };
153+
}
154+
}
155+
}
156+
157+
/// An instance of a DRM MM range allocator.
158+
pub struct Allocator<A: AllocInner<T>, T> {
159+
mm: Arc<Mutex<MmInner<A, T>>>,
160+
_p: PhantomData<T>,
161+
}
162+
163+
impl<A: AllocInner<T>, T> Allocator<A, T> {
164+
/// Create a new range allocator for the given start and size range of addresses.
165+
///
166+
/// The user may optionally provide an inner object representing allocator state, which will
167+
/// be protected by the same lock. If not required, `()` can be used.
168+
#[track_caller]
169+
pub fn new(start: u64, size: u64, inner: A) -> Result<Allocator<A, T>> {
170+
// SAFETY: We call `Mutex::init_lock` below.
171+
let mm = UniqueArc::pin_init(
172+
new_mutex!(MmInner(Opaque::uninit(), inner, PhantomData)),
173+
GFP_KERNEL,
174+
)?;
175+
176+
// SAFETY: The Opaque instance provides a valid pointer, and it is initialized after
177+
// this call.
178+
unsafe {
179+
bindings::drm_mm_init(mm.lock().0.get(), start, size);
180+
}
181+
182+
Ok(Allocator {
183+
mm: mm.into(),
184+
_p: PhantomData,
185+
})
186+
}
187+
188+
/// Insert a new node into the allocator of a given size.
189+
///
190+
/// `node` is the user `T` type data to store into the node.
191+
pub fn insert_node(&mut self, node: T, size: u64) -> Result<Node<A, T>> {
192+
self.insert_node_generic(node, size, 0, 0, InsertMode::Best)
193+
}
194+
195+
/// Insert a new node into the allocator of a given size, with configurable alignment,
196+
/// color, and insertion mode.
197+
///
198+
/// `node` is the user `T` type data to store into the node.
199+
pub fn insert_node_generic(
200+
&mut self,
201+
node: T,
202+
size: u64,
203+
alignment: u64,
204+
color: usize,
205+
mode: InsertMode,
206+
) -> Result<Node<A, T>> {
207+
self.insert_node_in_range(node, size, alignment, color, 0, u64::MAX, mode)
208+
}
209+
210+
/// Insert a new node into the allocator of a given size, with configurable alignment,
211+
/// color, insertion mode, and sub-range to allocate from.
212+
///
213+
/// `node` is the user `T` type data to store into the node.
214+
#[allow(clippy::too_many_arguments)]
215+
pub fn insert_node_in_range(
216+
&mut self,
217+
node: T,
218+
size: u64,
219+
alignment: u64,
220+
color: usize,
221+
start: u64,
222+
end: u64,
223+
mode: InsertMode,
224+
) -> Result<Node<A, T>> {
225+
let mut mm_node = KBox::new(
226+
NodeData {
227+
// SAFETY: This C struct should be zero-initialized.
228+
node: unsafe { core::mem::zeroed() },
229+
valid: false,
230+
inner: node,
231+
mm: self.mm.clone(),
232+
_pin: PhantomPinned,
233+
},
234+
GFP_KERNEL,
235+
)?;
236+
237+
let guard = self.mm.lock();
238+
// SAFETY: We hold the lock and all pointers are valid.
239+
to_result(unsafe {
240+
bindings::drm_mm_insert_node_in_range(
241+
guard.0.get(),
242+
&mut mm_node.node,
243+
size,
244+
alignment,
245+
color,
246+
start,
247+
end,
248+
mode as u32,
249+
)
250+
})?;
251+
252+
mm_node.valid = true;
253+
254+
Ok(Pin::from(mm_node))
255+
}
256+
257+
/// Insert a node into the allocator at a fixed start address.
258+
///
259+
/// `node` is the user `T` type data to store into the node.
260+
pub fn reserve_node(
261+
&mut self,
262+
node: T,
263+
start: u64,
264+
size: u64,
265+
color: usize,
266+
) -> Result<Node<A, T>> {
267+
let mut mm_node = KBox::new(
268+
NodeData {
269+
// SAFETY: This C struct should be zero-initialized.
270+
node: unsafe { core::mem::zeroed() },
271+
valid: false,
272+
inner: node,
273+
mm: self.mm.clone(),
274+
_pin: PhantomPinned,
275+
},
276+
GFP_KERNEL,
277+
)?;
278+
279+
mm_node.node.start = start;
280+
mm_node.node.size = size;
281+
mm_node.node.color = color as crate::ffi::c_ulong;
282+
283+
let guard = self.mm.lock();
284+
// SAFETY: We hold the lock and all pointers are valid.
285+
to_result(unsafe { bindings::drm_mm_reserve_node(guard.0.get(), &mut mm_node.node) })?;
286+
287+
mm_node.valid = true;
288+
289+
Ok(Pin::from(mm_node))
290+
}
291+
292+
/// Operate on the inner user type `A`, taking the allocator lock
293+
pub fn with_inner<RetVal>(&self, cb: impl FnOnce(&mut A) -> RetVal) -> RetVal {
294+
let mut guard = self.mm.lock();
295+
cb(&mut guard.1)
296+
}
297+
}
298+
299+
impl<A: AllocInner<T>, T> Drop for MmInner<A, T> {
300+
fn drop(&mut self) {
301+
// SAFETY: If the MmInner is dropped then all nodes are gone (since they hold references),
302+
// so it is safe to tear down the allocator.
303+
unsafe {
304+
bindings::drm_mm_takedown(self.0.get());
305+
}
306+
}
307+
}
308+
309+
// SAFETY: MmInner is safely Send if the AllocInner user type is Send.
310+
unsafe impl<A: Send + AllocInner<T>, T> Send for MmInner<A, T> {}

rust/kernel/drm/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ pub mod driver;
77
pub mod file;
88
pub mod gem;
99
pub mod ioctl;
10+
pub mod mm;
1011

1112
pub use self::device::Device;
1213
pub use self::driver::Driver;

0 commit comments

Comments
 (0)