Skip to content

Commit cfc62bc

Browse files
metaspacejannau
authored andcommitted
rust: add dma pool and coherent allocator
Based on wedsonaf@02541e6
1 parent 1d00b9d commit cfc62bc

3 files changed

Lines changed: 249 additions & 0 deletions

File tree

rust/bindings/bindings_helper.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <linux/blkdev.h>
1818
#include <linux/delay.h>
1919
#include <linux/dma-mapping.h>
20+
#include <linux/dmapool.h>
2021
#include <linux/errname.h>
2122
#include <linux/ethtool.h>
2223
#include <linux/firmware.h>
@@ -29,6 +30,7 @@
2930
#include <linux/of.h>
3031
#include <linux/of_address.h>
3132
#include <linux/of_device.h>
33+
#include <linux/of_dma.h>
3234
#include <linux/pci.h>
3335
#include <linux/phy.h>
3436
#include <linux/platform_device.h>

rust/kernel/dma.rs

Lines changed: 246 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,246 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! Direct memory access (DMA).
4+
//!
5+
//! C header: [`include/linux/dma-mapping.h`](../../../../include/linux/dma-mapping.h)
6+
7+
use crate::{
8+
alloc::flags, bindings, device::Device, error::code::*, error::Result, str::CStr, sync::Arc,
9+
types::ARef,
10+
};
11+
use core::marker::PhantomData;
12+
13+
pub trait Allocator {
14+
type AllocationData;
15+
type DataSource;
16+
17+
fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, alloc_data: &Self::AllocationData);
18+
unsafe fn allocation_data(data: &Self::DataSource) -> Self::AllocationData;
19+
}
20+
21+
pub struct CoherentAllocator;
22+
23+
impl Allocator for CoherentAllocator {
24+
type AllocationData = ARef<Device>;
25+
type DataSource = ARef<Device>;
26+
27+
fn free(cpu_addr: *mut (), dma_handle: u64, size: usize, dev: &ARef<Device>) {
28+
unsafe { bindings::dma_free_attrs(dev.as_raw(), size, cpu_addr as _, dma_handle, 0) };
29+
}
30+
31+
unsafe fn allocation_data(data: &ARef<Device>) -> ARef<Device> {
32+
data.clone()
33+
}
34+
}
35+
36+
pub fn try_alloc_coherent<T>(
37+
dev: ARef<Device>,
38+
count: usize,
39+
atomic: bool,
40+
) -> Result<CoherentAllocation<T, CoherentAllocator>> {
41+
let t_size = core::mem::size_of::<T>();
42+
let size = count.checked_mul(t_size).ok_or(ENOMEM)?;
43+
let mut dma_handle = 0;
44+
let ret = unsafe {
45+
bindings::dma_alloc_attrs(
46+
dev.as_raw(),
47+
size,
48+
&mut dma_handle,
49+
if atomic {
50+
bindings::GFP_ATOMIC
51+
} else {
52+
bindings::GFP_KERNEL
53+
},
54+
0,
55+
)
56+
};
57+
if ret.is_null() {
58+
Err(ENOMEM)
59+
} else {
60+
Ok(CoherentAllocation::new(ret as _, dma_handle, count, dev))
61+
}
62+
}
63+
64+
pub struct Pool<T> {
65+
ptr: *mut bindings::dma_pool,
66+
dev: ARef<Device>,
67+
count: usize,
68+
_p: PhantomData<T>,
69+
}
70+
71+
impl<T> Pool<T> {
72+
/// Creates a new DMA memory pool.
73+
pub fn try_new(
74+
name: &CStr,
75+
dev: ARef<Device>,
76+
count: usize,
77+
align: usize,
78+
boundary: usize,
79+
) -> Result<Arc<Self>> {
80+
let t_size = core::mem::size_of::<T>();
81+
let size = count.checked_mul(t_size).ok_or(ENOMEM)?;
82+
let ptr = unsafe {
83+
bindings::dma_pool_create(name.as_char_ptr(), dev.as_raw(), size, align, boundary)
84+
};
85+
if ptr.is_null() {
86+
Err(ENOMEM)
87+
} else {
88+
Arc::new(
89+
Self {
90+
ptr,
91+
count,
92+
dev,
93+
_p: PhantomData,
94+
},
95+
flags::GFP_KERNEL,
96+
)
97+
.map_err(|e| e.into())
98+
}
99+
}
100+
101+
/// Allocates some memory from the pool.
102+
pub fn try_alloc(&self, atomic: bool) -> Result<CoherentAllocation<T, Self>> {
103+
let flags = if atomic {
104+
bindings::GFP_ATOMIC
105+
} else {
106+
bindings::GFP_KERNEL
107+
};
108+
109+
let mut dma_handle = 0;
110+
let ptr = unsafe { bindings::dma_pool_alloc(self.ptr, flags, &mut dma_handle) };
111+
if ptr.is_null() {
112+
Err(ENOMEM)
113+
} else {
114+
Ok(CoherentAllocation::new(
115+
ptr as _, dma_handle, self.count, self.ptr,
116+
))
117+
}
118+
}
119+
}
120+
121+
impl<T> Allocator for Pool<T> {
122+
type AllocationData = *mut bindings::dma_pool;
123+
type DataSource = Arc<Pool<T>>;
124+
125+
fn free(cpu_addr: *mut (), dma_handle: u64, _size: usize, pool: &*mut bindings::dma_pool) {
126+
unsafe { bindings::dma_pool_free(*pool, cpu_addr as _, dma_handle) };
127+
}
128+
129+
unsafe fn allocation_data(data: &Arc<Pool<T>>) -> *mut bindings::dma_pool {
130+
data.ptr
131+
}
132+
}
133+
134+
impl<T> Drop for Pool<T> {
135+
fn drop(&mut self) {
136+
// SAFETY: `Pool` is always reference-counted and each allocation increments it, so all
137+
// allocations have been freed by the time this gets called.
138+
unsafe { bindings::dma_pool_destroy(self.ptr) };
139+
}
140+
}
141+
142+
pub struct CoherentAllocation<T, A: Allocator> {
143+
alloc_data: A::AllocationData,
144+
pub dma_handle: u64,
145+
count: usize,
146+
cpu_addr: *mut T,
147+
}
148+
149+
impl<T, A: Allocator> CoherentAllocation<T, A> {
150+
fn new(cpu_addr: *mut T, dma_handle: u64, count: usize, alloc_data: A::AllocationData) -> Self {
151+
Self {
152+
dma_handle,
153+
count,
154+
cpu_addr,
155+
alloc_data,
156+
}
157+
}
158+
159+
pub fn read(&self, index: usize) -> Option<T> {
160+
if index >= self.count {
161+
return None;
162+
}
163+
164+
let ptr = self.cpu_addr.wrapping_add(index);
165+
// SAFETY: We just checked that the index is within bounds.
166+
Some(unsafe { ptr.read() })
167+
}
168+
169+
pub fn read_volatile(&self, index: usize) -> Option<T> {
170+
if index >= self.count {
171+
return None;
172+
}
173+
174+
let ptr = self.cpu_addr.wrapping_add(index);
175+
// SAFETY: We just checked that the index is within bounds.
176+
Some(unsafe { ptr.read_volatile() })
177+
}
178+
179+
pub fn write(&self, index: usize, value: &T) -> bool
180+
where
181+
T: Copy,
182+
{
183+
if index >= self.count {
184+
return false;
185+
}
186+
187+
let ptr = self.cpu_addr.wrapping_add(index);
188+
// SAFETY: We just checked that the index is within bounds.
189+
unsafe { ptr.write(*value) };
190+
true
191+
}
192+
193+
pub fn read_write(&self, index: usize, value: T) -> Option<T> {
194+
if index >= self.count {
195+
return None;
196+
}
197+
198+
let ptr = self.cpu_addr.wrapping_add(index);
199+
// SAFETY: We just checked that the index is within bounds.
200+
let ret = unsafe { ptr.read() };
201+
// SAFETY: We just checked that the index is within bounds.
202+
unsafe { ptr.write(value) };
203+
Some(ret)
204+
}
205+
206+
pub unsafe fn from_parts(
207+
data: &A::DataSource,
208+
ptr: usize,
209+
dma_handle: u64,
210+
count: usize,
211+
) -> Self {
212+
Self {
213+
dma_handle,
214+
count,
215+
cpu_addr: ptr as _,
216+
// SAFETY: The safety requirements of the current function satisfy those of
217+
// `allocation_data`.
218+
alloc_data: unsafe { A::allocation_data(data) },
219+
}
220+
}
221+
222+
pub fn into_parts(self) -> (usize, u64) {
223+
let ret = (self.cpu_addr as _, self.dma_handle);
224+
core::mem::forget(self);
225+
ret
226+
}
227+
228+
pub fn first_ptr(&self) -> *const T {
229+
self.cpu_addr
230+
}
231+
232+
pub fn first_ptr_mut(&self) -> *mut T {
233+
self.cpu_addr
234+
}
235+
236+
pub fn count(&self) -> usize {
237+
self.count
238+
}
239+
}
240+
241+
impl<T, A: Allocator> Drop for CoherentAllocation<T, A> {
242+
fn drop(&mut self) {
243+
let size = self.count * core::mem::size_of::<T>();
244+
A::free(self.cpu_addr as _, self.dma_handle, size, &self.alloc_data);
245+
}
246+
}

rust/kernel/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ pub mod delay;
4848
pub mod device;
4949
pub mod device_id;
5050
pub mod devres;
51+
pub mod dma;
5152
pub mod driver;
5253
#[cfg(CONFIG_DRM = "y")]
5354
pub mod drm;

0 commit comments

Comments
 (0)