|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | + |
| 3 | +//! IOMMU page table management. |
| 4 | +//! |
| 5 | +//! C header: [`include/io-pgtable.h`](srctree/include/io-pgtable.h) |
| 6 | +
|
| 7 | +use core::{ |
| 8 | + marker::PhantomData, |
| 9 | + ptr::NonNull, // |
| 10 | +}; |
| 11 | + |
| 12 | +use crate::{ |
| 13 | + alloc, |
| 14 | + bindings, |
| 15 | + device::{ |
| 16 | + Bound, |
| 17 | + Device // |
| 18 | + }, |
| 19 | + devres::Devres, |
| 20 | + error::to_result, |
| 21 | + io::PhysAddr, |
| 22 | + prelude::*, // |
| 23 | +}; |
| 24 | + |
| 25 | +use bindings::io_pgtable_fmt; |
| 26 | + |
| 27 | +/// Protection flags used with IOMMU mappings. |
| 28 | +pub mod prot { |
| 29 | + /// Read access. |
| 30 | + pub const READ: u32 = bindings::IOMMU_READ; |
| 31 | + /// Write access. |
| 32 | + pub const WRITE: u32 = bindings::IOMMU_WRITE; |
| 33 | + /// Request cache coherency. |
| 34 | + pub const CACHE: u32 = bindings::IOMMU_CACHE; |
| 35 | + /// Request no-execute permission. |
| 36 | + pub const NOEXEC: u32 = bindings::IOMMU_NOEXEC; |
| 37 | + /// MMIO peripheral mapping. |
| 38 | + pub const MMIO: u32 = bindings::IOMMU_MMIO; |
| 39 | + /// Privileged mapping. |
| 40 | + pub const PRIVILEGED: u32 = bindings::IOMMU_PRIV; |
| 41 | +} |
| 42 | + |
| 43 | +/// Represents a requested `io_pgtable` configuration. |
| 44 | +pub struct Config { |
| 45 | + /// Quirk bitmask (type-specific). |
| 46 | + pub quirks: usize, |
| 47 | + /// Valid page sizes, as a bitmask of powers of two. |
| 48 | + pub pgsize_bitmap: usize, |
| 49 | + /// Input address space size in bits. |
| 50 | + pub ias: u32, |
| 51 | + /// Output address space size in bits. |
| 52 | + pub oas: u32, |
| 53 | + /// IOMMU uses coherent accesses for page table walks. |
| 54 | + pub coherent_walk: bool, |
| 55 | +} |
| 56 | + |
| 57 | +/// An io page table using a specific format. |
| 58 | +/// |
| 59 | +/// # Invariants |
| 60 | +/// |
| 61 | +/// The pointer references a valid io page table. |
| 62 | +pub struct IoPageTable<F: IoPageTableFmt> { |
| 63 | + ptr: NonNull<bindings::io_pgtable_ops>, |
| 64 | + _marker: PhantomData<F>, |
| 65 | +} |
| 66 | + |
| 67 | +// SAFETY: `struct io_pgtable_ops` is not restricted to a single thread. |
| 68 | +unsafe impl<F: IoPageTableFmt> Send for IoPageTable<F> {} |
| 69 | +// SAFETY: `struct io_pgtable_ops` may be accessed concurrently. |
| 70 | +unsafe impl<F: IoPageTableFmt> Sync for IoPageTable<F> {} |
| 71 | + |
| 72 | +/// The format used by this page table. |
| 73 | +pub trait IoPageTableFmt: 'static { |
| 74 | + /// The value representing this format. |
| 75 | + const FORMAT: io_pgtable_fmt; |
| 76 | +} |
| 77 | + |
| 78 | +impl<F: IoPageTableFmt> IoPageTable<F> { |
| 79 | + /// Create a new `IoPageTable` as a device resource. |
| 80 | + #[inline] |
| 81 | + pub fn new( |
| 82 | + dev: &Device<Bound>, |
| 83 | + config: Config, |
| 84 | + ) -> impl PinInit<Devres<IoPageTable<F>>, Error> + '_ { |
| 85 | + // SAFETY: Devres ensures that the value is dropped during device unbind. |
| 86 | + Devres::new(dev, unsafe { Self::new_raw(dev, config) }) |
| 87 | + } |
| 88 | + |
| 89 | + /// Create a new `IoPageTable`. |
| 90 | + /// |
| 91 | + /// # Safety |
| 92 | + /// |
| 93 | + /// If successful, then the returned `IoPageTable` must be dropped before the device is |
| 94 | + /// unbound. |
| 95 | + #[inline] |
| 96 | + pub unsafe fn new_raw(dev: &Device<Bound>, config: Config) -> Result<IoPageTable<F>> { |
| 97 | + let mut raw_cfg = bindings::io_pgtable_cfg { |
| 98 | + quirks: config.quirks, |
| 99 | + pgsize_bitmap: config.pgsize_bitmap, |
| 100 | + ias: config.ias, |
| 101 | + oas: config.oas, |
| 102 | + coherent_walk: config.coherent_walk, |
| 103 | + tlb: &raw const NOOP_FLUSH_OPS, |
| 104 | + iommu_dev: dev.as_raw(), |
| 105 | + // SAFETY: All zeroes is a valid value for `struct io_pgtable_cfg`. |
| 106 | + ..unsafe { core::mem::zeroed() } |
| 107 | + }; |
| 108 | + |
| 109 | + // SAFETY: |
| 110 | + // * The raw_cfg pointer is valid for the duration of this call. |
| 111 | + // * The provided `FLUSH_OPS` contains valid function pointers that accept a null pointer |
| 112 | + // as cookie. |
| 113 | + // * The caller ensures that the io pgtable does not outlive the device. |
| 114 | + let ops = unsafe { |
| 115 | + bindings::alloc_io_pgtable_ops(F::FORMAT, &mut raw_cfg, core::ptr::null_mut()) |
| 116 | + }; |
| 117 | + |
| 118 | + // INVARIANT: We successfully created a valid page table. |
| 119 | + Ok(IoPageTable { |
| 120 | + ptr: NonNull::new(ops).ok_or(ENOMEM)?, |
| 121 | + _marker: PhantomData, |
| 122 | + }) |
| 123 | + } |
| 124 | + |
| 125 | + /// Obtain a raw pointer to the underlying `struct io_pgtable_ops`. |
| 126 | + #[inline] |
| 127 | + pub fn raw_ops(&self) -> *mut bindings::io_pgtable_ops { |
| 128 | + self.ptr.as_ptr() |
| 129 | + } |
| 130 | + |
| 131 | + /// Obtain a raw pointer to the underlying `struct io_pgtable`. |
| 132 | + #[inline] |
| 133 | + pub fn raw_pgtable(&self) -> *mut bindings::io_pgtable { |
| 134 | + // SAFETY: The io_pgtable_ops of an io-pgtable is always the ops field of a io_pgtable. |
| 135 | + unsafe { kernel::container_of!(self.raw_ops(), bindings::io_pgtable, ops) } |
| 136 | + } |
| 137 | + |
| 138 | + /// Obtain a raw pointer to the underlying `struct io_pgtable_cfg`. |
| 139 | + #[inline] |
| 140 | + pub fn raw_cfg(&self) -> *mut bindings::io_pgtable_cfg { |
| 141 | + // SAFETY: The `raw_pgtable()` method returns a valid pointer. |
| 142 | + unsafe { &raw mut (*self.raw_pgtable()).cfg } |
| 143 | + } |
| 144 | + |
| 145 | + /// Map a physically contiguous range of pages of the same size. |
| 146 | + /// |
| 147 | + /// Even if successful, this operation may not map the entire range. In that case, only a |
| 148 | + /// prefix of the range is mapped, and the returned integer indicates its length in bytes. In |
| 149 | + /// this case, the caller will usually call `map_pages` again for the remaining range. |
| 150 | + /// |
| 151 | + /// The returned [`Result`] indicates whether an error was encountered while mapping pages. |
| 152 | + /// Note that this may return a non-zero length even if an error was encountered. The caller |
| 153 | + /// will usually [unmap the relevant pages](Self::unmap_pages) on error. |
| 154 | + /// |
| 155 | + /// The caller must flush the TLB before using the pgtable to access the newly created mapping. |
| 156 | + /// |
| 157 | + /// # Safety |
| 158 | + /// |
| 159 | + /// * No other io-pgtable operation may access the range `iova .. iova+pgsize*pgcount` while |
| 160 | + /// this `map_pages` operation executes. |
| 161 | + /// * This page table must not contain any mapping that overlaps with the mapping created by |
| 162 | + /// this call. |
| 163 | + /// * If this page table is live, then the caller must ensure that it's okay to access the |
| 164 | + /// physical address being mapped for the duration in which it is mapped. |
| 165 | + #[inline] |
| 166 | + pub unsafe fn map_pages( |
| 167 | + &self, |
| 168 | + iova: usize, |
| 169 | + paddr: PhysAddr, |
| 170 | + pgsize: usize, |
| 171 | + pgcount: usize, |
| 172 | + prot: u32, |
| 173 | + flags: alloc::Flags, |
| 174 | + ) -> (usize, Result) { |
| 175 | + let mut mapped: usize = 0; |
| 176 | + |
| 177 | + // SAFETY: The `map_pages` function in `io_pgtable_ops` is never null. |
| 178 | + let map_pages = unsafe { (*self.raw_ops()).map_pages.unwrap_unchecked() }; |
| 179 | + |
| 180 | + // SAFETY: The safety requirements of this method are sufficient to call `map_pages`. |
| 181 | + let ret = to_result(unsafe { |
| 182 | + (map_pages)( |
| 183 | + self.raw_ops(), |
| 184 | + iova, |
| 185 | + paddr, |
| 186 | + pgsize, |
| 187 | + pgcount, |
| 188 | + prot as i32, |
| 189 | + flags.as_raw(), |
| 190 | + &mut mapped, |
| 191 | + ) |
| 192 | + }); |
| 193 | + |
| 194 | + (mapped, ret) |
| 195 | + } |
| 196 | + |
| 197 | + /// Unmap a range of virtually contiguous pages of the same size. |
| 198 | + /// |
| 199 | + /// This may not unmap the entire range, and returns the length of the unmapped prefix in |
| 200 | + /// bytes. |
| 201 | + /// |
| 202 | + /// # Safety |
| 203 | + /// |
| 204 | + /// * No other io-pgtable operation may access the range `iova .. iova+pgsize*pgcount` while |
| 205 | + /// this `unmap_pages` operation executes. |
| 206 | + /// * This page table must contain one or more consecutive mappings starting at `iova` whose |
| 207 | + /// total size is `pgcount * pgsize`. |
| 208 | + #[inline] |
| 209 | + #[must_use] |
| 210 | + pub unsafe fn unmap_pages(&self, iova: usize, pgsize: usize, pgcount: usize) -> usize { |
| 211 | + // SAFETY: The `unmap_pages` function in `io_pgtable_ops` is never null. |
| 212 | + let unmap_pages = unsafe { (*self.raw_ops()).unmap_pages.unwrap_unchecked() }; |
| 213 | + |
| 214 | + // SAFETY: The safety requirements of this method are sufficient to call `unmap_pages`. |
| 215 | + unsafe { (unmap_pages)(self.raw_ops(), iova, pgsize, pgcount, core::ptr::null_mut()) } |
| 216 | + } |
| 217 | +} |
| 218 | + |
| 219 | +// For the initial users of these rust bindings, the GPU FW is managing the IOTLB and performs all |
| 220 | +// required invalidations using a range. There is no need for it get ARM style invalidation |
| 221 | +// instructions from the page table code. |
| 222 | +// |
| 223 | +// Support for flushing the TLB with ARM style invalidation instructions may be added in the |
| 224 | +// future. |
| 225 | +static NOOP_FLUSH_OPS: bindings::iommu_flush_ops = bindings::iommu_flush_ops { |
| 226 | + tlb_flush_all: Some(rust_tlb_flush_all_noop), |
| 227 | + tlb_flush_walk: Some(rust_tlb_flush_walk_noop), |
| 228 | + tlb_add_page: None, |
| 229 | +}; |
| 230 | + |
| 231 | +#[no_mangle] |
| 232 | +extern "C" fn rust_tlb_flush_all_noop(_cookie: *mut core::ffi::c_void) {} |
| 233 | + |
| 234 | +#[no_mangle] |
| 235 | +extern "C" fn rust_tlb_flush_walk_noop( |
| 236 | + _iova: usize, |
| 237 | + _size: usize, |
| 238 | + _granule: usize, |
| 239 | + _cookie: *mut core::ffi::c_void, |
| 240 | +) { |
| 241 | +} |
| 242 | + |
| 243 | +impl<F: IoPageTableFmt> Drop for IoPageTable<F> { |
| 244 | + fn drop(&mut self) { |
| 245 | + // SAFETY: The caller of `Self::ttbr()` promised that the page table is not live when this |
| 246 | + // destructor runs. |
| 247 | + unsafe { bindings::free_io_pgtable_ops(self.raw_ops()) }; |
| 248 | + } |
| 249 | +} |
| 250 | + |
| 251 | +/// The `ARM_64_LPAE_S1` page table format. |
| 252 | +pub enum ARM64LPAES1 {} |
| 253 | + |
| 254 | +impl IoPageTableFmt for ARM64LPAES1 { |
| 255 | + const FORMAT: io_pgtable_fmt = bindings::io_pgtable_fmt_ARM_64_LPAE_S1 as io_pgtable_fmt; |
| 256 | +} |
| 257 | + |
| 258 | +impl IoPageTable<ARM64LPAES1> { |
| 259 | + /// Access the `ttbr` field of the configuration. |
| 260 | + /// |
| 261 | + /// This is the physical address of the page table, which may be passed to the device that |
| 262 | + /// needs to use it. |
| 263 | + /// |
| 264 | + /// # Safety |
| 265 | + /// |
| 266 | + /// The caller must ensure that the device stops using the page table before dropping it. |
| 267 | + #[inline] |
| 268 | + pub unsafe fn ttbr(&self) -> u64 { |
| 269 | + // SAFETY: `arm_lpae_s1_cfg` is the right cfg type for `ARM64LPAES1`. |
| 270 | + unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.ttbr } |
| 271 | + } |
| 272 | + |
| 273 | + /// Access the `mair` field of the configuration. |
| 274 | + #[inline] |
| 275 | + pub fn mair(&self) -> u64 { |
| 276 | + // SAFETY: `arm_lpae_s1_cfg` is the right cfg type for `ARM64LPAES1`. |
| 277 | + unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.mair } |
| 278 | + } |
| 279 | +} |
0 commit comments