Skip to content

Commit 1914d26

Browse files
committed
rust: folio: introduce basic support for folios
We implement the minimum needed in `read_folio` (part of `struct address_space_operations`) to implement simple read-only file systems. Signed-off-by: Wedson Almeida Filho <walmeida@microsoft.com>
1 parent f747b3a commit 1914d26

5 files changed

Lines changed: 223 additions & 0 deletions

File tree

rust/bindings/bindings_helper.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/fs.h>
1212
#include <linux/fs_context.h>
1313
#include <linux/slab.h>
14+
#include <linux/pagemap.h>
1415
#include <linux/refcount.h>
1516
#include <linux/wait.h>
1617
#include <linux/sched.h>
@@ -26,3 +27,5 @@ const slab_flags_t BINDINGS_SLAB_MEM_SPREAD = SLAB_MEM_SPREAD;
2627
const unsigned long BINDINGS_SB_RDONLY = SB_RDONLY;
2728

2829
const loff_t BINDINGS_MAX_LFS_FILESIZE = MAX_LFS_FILESIZE;
30+
31+
const size_t BINDINGS_PAGE_SIZE = PAGE_SIZE;

rust/bindings/lib.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,3 +58,5 @@ pub const SLAB_MEM_SPREAD: slab_flags_t = BINDINGS_SLAB_MEM_SPREAD;
5858
pub const SB_RDONLY: core::ffi::c_ulong = BINDINGS_SB_RDONLY;
5959

6060
pub const MAX_LFS_FILESIZE: loff_t = BINDINGS_MAX_LFS_FILESIZE;
61+
62+
pub const PAGE_SIZE: usize = BINDINGS_PAGE_SIZE;

rust/helpers.c

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,14 @@
2323
#include <kunit/test-bug.h>
2424
#include <linux/bug.h>
2525
#include <linux/build_bug.h>
26+
#include <linux/cacheflush.h>
2627
#include <linux/err.h>
2728
#include <linux/errname.h>
2829
#include <linux/fs.h>
30+
#include <linux/highmem.h>
31+
#include <linux/mm.h>
2932
#include <linux/mutex.h>
33+
#include <linux/pagemap.h>
3034
#include <linux/refcount.h>
3135
#include <linux/sched/signal.h>
3236
#include <linux/spinlock.h>
@@ -145,6 +149,60 @@ struct kunit *rust_helper_kunit_get_current_test(void)
145149
}
146150
EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test);
147151

152+
void rust_helper_folio_get(struct folio *folio)
153+
{
154+
folio_get(folio);
155+
}
156+
EXPORT_SYMBOL_GPL(rust_helper_folio_get);
157+
158+
void rust_helper_folio_put(struct folio *folio)
159+
{
160+
folio_put(folio);
161+
}
162+
EXPORT_SYMBOL_GPL(rust_helper_folio_put);
163+
164+
loff_t rust_helper_folio_pos(struct folio *folio)
165+
{
166+
return folio_pos(folio);
167+
}
168+
EXPORT_SYMBOL_GPL(rust_helper_folio_pos);
169+
170+
size_t rust_helper_folio_size(struct folio *folio)
171+
{
172+
return folio_size(folio);
173+
}
174+
EXPORT_SYMBOL_GPL(rust_helper_folio_size);
175+
176+
void rust_helper_folio_mark_uptodate(struct folio *folio)
177+
{
178+
folio_mark_uptodate(folio);
179+
}
180+
EXPORT_SYMBOL_GPL(rust_helper_folio_mark_uptodate);
181+
182+
void rust_helper_folio_set_error(struct folio *folio)
183+
{
184+
folio_set_error(folio);
185+
}
186+
EXPORT_SYMBOL_GPL(rust_helper_folio_set_error);
187+
188+
void rust_helper_flush_dcache_folio(struct folio *folio)
189+
{
190+
flush_dcache_folio(folio);
191+
}
192+
EXPORT_SYMBOL_GPL(rust_helper_flush_dcache_folio);
193+
194+
void *rust_helper_kmap_local_folio(struct folio *folio, size_t offset)
195+
{
196+
return kmap_local_folio(folio, offset);
197+
}
198+
EXPORT_SYMBOL_GPL(rust_helper_kmap_local_folio);
199+
200+
void rust_helper_kunmap_local(const void *vaddr)
201+
{
202+
kunmap_local(vaddr);
203+
}
204+
EXPORT_SYMBOL_GPL(rust_helper_kunmap_local);
205+
148206
void rust_helper_i_uid_write(struct inode *inode, uid_t uid)
149207
{
150208
i_uid_write(inode, uid);
@@ -163,6 +221,12 @@ off_t rust_helper_i_size_read(const struct inode *inode)
163221
}
164222
EXPORT_SYMBOL_GPL(rust_helper_i_size_read);
165223

224+
void rust_helper_mapping_set_large_folios(struct address_space *mapping)
225+
{
226+
mapping_set_large_folios(mapping);
227+
}
228+
EXPORT_SYMBOL_GPL(rust_helper_mapping_set_large_folios);
229+
166230
/*
167231
* `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can
168232
* use it in contexts where Rust expects a `usize` like slice (array) indices.

rust/kernel/folio.rs

Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
3+
//! Groups of contiguous pages, folios.
4+
//!
5+
//! C headers: [`include/linux/mm.h`](../../include/linux/mm.h)
6+
7+
use crate::error::{code::*, Result};
8+
use crate::types::{AlwaysRefCounted, Opaque, ScopeGuard};
9+
use core::{cmp::min, ptr};
10+
11+
/// Wraps the kernel's `struct folio`.
12+
///
13+
/// # Invariants
14+
///
15+
/// Instances of this type are always ref-counted, that is, a call to `folio_get` ensures that the
16+
/// allocation remains valid at least until the matching call to `folio_put`.
17+
#[repr(transparent)]
18+
pub struct Folio(Opaque<bindings::folio>);
19+
20+
// SAFETY: The type invariants guarantee that `Folio` is always ref-counted.
21+
unsafe impl AlwaysRefCounted for Folio {
22+
fn inc_ref(&self) {
23+
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
24+
unsafe { bindings::folio_get(self.0.get()) };
25+
}
26+
27+
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
28+
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
29+
unsafe { bindings::folio_put(obj.cast().as_ptr()) }
30+
}
31+
}
32+
33+
impl Folio {
34+
/// Returns the byte position of this folio in its file.
35+
pub fn pos(&self) -> i64 {
36+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
37+
unsafe { bindings::folio_pos(self.0.get()) }
38+
}
39+
40+
/// Returns the byte size of this folio.
41+
pub fn size(&self) -> usize {
42+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
43+
unsafe { bindings::folio_size(self.0.get()) }
44+
}
45+
46+
/// Flushes the data cache for the pages that make up the folio.
47+
pub fn flush_dcache(&self) {
48+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
49+
unsafe { bindings::flush_dcache_folio(self.0.get()) }
50+
}
51+
}
52+
53+
/// A locked [`Folio`].
54+
pub struct LockedFolio<'a>(&'a Folio);
55+
56+
impl LockedFolio<'_> {
57+
/// Creates a new locked folio from a raw pointer.
58+
///
59+
/// # Safety
60+
///
61+
/// Callers must ensure that the folio is valid and locked. Additionally, that the
62+
/// responsibility of unlocking is transferred to the new instance of [`LockedFolio`]. Lastly,
63+
/// that the returned [`LockedFolio`] doesn't outlive the refcount that keeps it alive.
64+
#[allow(dead_code)]
65+
pub(crate) unsafe fn from_raw(folio: *const bindings::folio) -> Self {
66+
let ptr = folio.cast();
67+
// SAFETY: The safety requirements ensure that `folio` (from which `ptr` is derived) is
68+
// valid and will remain valid while the `LockedFolio` instance lives.
69+
Self(unsafe { &*ptr })
70+
}
71+
72+
/// Marks the folio as being up to date.
73+
pub fn mark_uptodate(&mut self) {
74+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
75+
unsafe { bindings::folio_mark_uptodate(self.0 .0.get()) }
76+
}
77+
78+
/// Sets the error flag on the folio.
79+
pub fn set_error(&mut self) {
80+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
81+
unsafe { bindings::folio_set_error(self.0 .0.get()) }
82+
}
83+
84+
fn for_each_page(
85+
&mut self,
86+
offset: usize,
87+
len: usize,
88+
mut cb: impl FnMut(&mut [u8]) -> Result,
89+
) -> Result {
90+
let mut remaining = len;
91+
let mut next_offset = offset;
92+
93+
// Check that we don't overflow the folio.
94+
let end = offset.checked_add(len).ok_or(EDOM)?;
95+
if end > self.size() {
96+
return Err(EINVAL);
97+
}
98+
99+
while remaining > 0 {
100+
let page_offset = next_offset & (bindings::PAGE_SIZE - 1);
101+
let usable = min(remaining, bindings::PAGE_SIZE - page_offset);
102+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount;
103+
// `next_offset` is also guaranteed be lesss than the folio size.
104+
let ptr = unsafe { bindings::kmap_local_folio(self.0 .0.get(), next_offset) };
105+
106+
// SAFETY: `ptr` was just returned by the `kmap_local_folio` above.
107+
let _guard = ScopeGuard::new(|| unsafe { bindings::kunmap_local(ptr) });
108+
109+
// SAFETY: `kmap_local_folio` maps whole page so we know it's mapped for at least
110+
// `usable` bytes.
111+
let s = unsafe { core::slice::from_raw_parts_mut(ptr.cast::<u8>(), usable) };
112+
cb(s)?;
113+
114+
next_offset += usable;
115+
remaining -= usable;
116+
}
117+
118+
Ok(())
119+
}
120+
121+
/// Writes the given slice into the folio.
122+
pub fn write(&mut self, offset: usize, data: &[u8]) -> Result {
123+
let mut remaining = data;
124+
125+
self.for_each_page(offset, data.len(), |s| {
126+
s.copy_from_slice(&remaining[..s.len()]);
127+
remaining = &remaining[s.len()..];
128+
Ok(())
129+
})
130+
}
131+
132+
/// Writes zeroes into the folio.
133+
pub fn zero_out(&mut self, offset: usize, len: usize) -> Result {
134+
self.for_each_page(offset, len, |s| {
135+
s.fill(0);
136+
Ok(())
137+
})
138+
}
139+
}
140+
141+
impl core::ops::Deref for LockedFolio<'_> {
142+
type Target = Folio;
143+
fn deref(&self) -> &Self::Target {
144+
self.0
145+
}
146+
}
147+
148+
impl Drop for LockedFolio<'_> {
149+
fn drop(&mut self) {
150+
// SAFETY: The folio is valid because the shared reference implies a non-zero refcount.
151+
unsafe { bindings::folio_unlock(self.0 .0.get()) }
152+
}
153+
}

rust/kernel/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ extern crate self as kernel;
3434
mod allocator;
3535
mod build_assert;
3636
pub mod error;
37+
pub mod folio;
3738
pub mod fs;
3839
pub mod init;
3940
pub mod ioctl;

0 commit comments

Comments
 (0)