Skip to content

Commit 9666534

Browse files
hoshinolinajannau
authored andcommitted
drm/asahi: pgtable: Add dumper
Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent f7c7cf7 commit 9666534

1 file changed

Lines changed: 134 additions & 16 deletions

File tree

drivers/gpu/drm/asahi/pgtable.rs

Lines changed: 134 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ use core::mem::size_of;
1111
use core::ops::Range;
1212
use core::sync::atomic::{AtomicU64, Ordering};
1313

14-
use kernel::addr::PhysicalAddr;
15-
use kernel::{error::Result, page::Page, prelude::*};
14+
use kernel::{addr::PhysicalAddr, error::Result, page::Page, prelude::*, types::Owned};
1615

1716
use crate::debug::*;
1817
use crate::util::align;
@@ -165,6 +164,12 @@ impl Default for Prot {
165164
}
166165
}
167166

167+
pub(crate) struct DumpedPage {
168+
pub(crate) iova: u64,
169+
pub(crate) pte: u64,
170+
pub(crate) data: Option<Owned<Page>>,
171+
}
172+
168173
pub(crate) struct UatPageTable {
169174
ttb: PhysicalAddr,
170175
ttb_owned: bool,
@@ -223,11 +228,22 @@ impl UatPageTable {
223228
self.ttb
224229
}
225230

226-
fn with_pages<F>(&mut self, iova_range: Range<u64>, free: bool, mut cb: F) -> Result
231+
fn with_pages<F>(
232+
&mut self,
233+
iova_range: Range<u64>,
234+
alloc: bool,
235+
free: bool,
236+
mut cb: F,
237+
) -> Result
227238
where
228-
F: FnMut(u64, &[Pte]),
239+
F: FnMut(u64, &[Pte]) -> Result,
229240
{
230-
mod_pr_debug!("UATPageTable::with_pages: {:#x?} {}\n", iova_range, free);
241+
mod_pr_debug!(
242+
"UATPageTable::with_pages: {:#x?} alloc={} free={}\n",
243+
iova_range,
244+
alloc,
245+
free
246+
);
231247
if (iova_range.start | iova_range.end) & (UAT_PGMSK as u64) != 0 {
232248
pr_err!(
233249
"UATPageTable::with_pages: iova range not aligned: {:#x?}\n",
@@ -287,10 +303,12 @@ impl UatPageTable {
287303
pt_addr[level] =
288304
upt.with_pointer_into_page(upidx * PTE_SIZE, PTE_SIZE, |p| {
289305
let uptep = p as *const _ as *const Pte;
306+
// SAFETY: with_pointer_into_page() ensures the pointer is valid,
307+
// and our index is aligned so it is safe to deref as an AtomicU64.
290308
let upte = unsafe { &*uptep };
291309
let mut upte_val = upte.load(Ordering::Relaxed);
292310
// Allocate if requested
293-
if upte_val == 0 && !free {
311+
if upte_val == 0 && alloc {
294312
let pt_page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
295313
mod_pr_debug!("UATPageTable::with_pages: alloc PT at {:#x}\n", pt_page.phys());
296314
let pt_paddr = Page::into_phys(pt_page);
@@ -299,7 +317,7 @@ impl UatPageTable {
299317
}
300318
if upte_val & PTE_TYPE_BITS == PTE_TYPE_LEAF_TABLE {
301319
Ok(Some(upte_val & self.oas_mask & (!UAT_PGMSK as u64)))
302-
} else if upte_val == 0 {
320+
} else if upte_val == 0 || (!alloc && !free) {
303321
mod_pr_debug!("UATPageTable::with_pages: no level {}\n", level);
304322
Ok(None)
305323
} else {
@@ -333,8 +351,6 @@ impl UatPageTable {
333351
let max_count = UAT_NPTE - idx;
334352
let count = (((end - iova) >> UAT_PGBIT) as usize).min(max_count);
335353
let phys = pt_addr[0].unwrap();
336-
// SAFETY: Page table addresses are either allocated by us, or
337-
// firmware-managed and safe to borrow a struct page from.
338354
mod_pr_debug!(
339355
"UATPageTable::with_pages: leaf PT at {:#x} idx {:#x} count {:#x} iova {:#x}\n",
340356
phys,
@@ -350,7 +366,7 @@ impl UatPageTable {
350366
// SAFETY: We know this is a valid pointer to PTEs and the range is valid and
351367
// checked by with_pointer_into_page().
352368
let ptes = unsafe { core::slice::from_raw_parts(ptep, count) };
353-
cb(iova, ptes);
369+
cb(iova, ptes)?;
354370
Ok(())
355371
})?;
356372

@@ -361,12 +377,12 @@ impl UatPageTable {
361377
if free {
362378
for level in (0..UAT_LEVELS - 1).rev() {
363379
if let Some(phys) = pt_addr[level] {
364-
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
365380
mod_pr_debug!(
366381
"UATPageTable::with_pages: free level {} {:#x?}\n",
367382
level,
368383
phys
369384
);
385+
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
370386
unsafe { Page::from_phys(phys) };
371387
}
372388
}
@@ -377,7 +393,7 @@ impl UatPageTable {
377393

378394
pub(crate) fn alloc_pages(&mut self, iova_range: Range<u64>) -> Result {
379395
mod_pr_debug!("UATPageTable::alloc_pages: {:#x?}\n", iova_range);
380-
self.with_pages(iova_range, false, |_, _| {})
396+
self.with_pages(iova_range, true, false, |_, _| Ok(()))
381397
}
382398

383399
pub(crate) fn map_pages(
@@ -398,7 +414,7 @@ impl UatPageTable {
398414
return Err(EINVAL);
399415
}
400416

401-
self.with_pages(iova_range, false, |iova, ptes| {
417+
self.with_pages(iova_range, true, false, |iova, ptes| {
402418
for (idx, pte) in ptes.iter().enumerate() {
403419
let ptev = pte.load(Ordering::Relaxed);
404420
if ptev != 0 {
@@ -416,6 +432,7 @@ impl UatPageTable {
416432
phys += UAT_PGSZ as PhysicalAddr;
417433
}
418434
}
435+
Ok(())
419436
})
420437
}
421438

@@ -425,7 +442,7 @@ impl UatPageTable {
425442
iova_range,
426443
prot
427444
);
428-
self.with_pages(iova_range, false, |iova, ptes| {
445+
self.with_pages(iova_range, true, false, |iova, ptes| {
429446
for (idx, pte) in ptes.iter().enumerate() {
430447
let ptev = pte.load(Ordering::Relaxed);
431448
if ptev & PTE_TYPE_BITS != PTE_TYPE_LEAF_TABLE {
@@ -438,12 +455,13 @@ impl UatPageTable {
438455
}
439456
pte.store((ptev & !UAT_PROT_BITS) | prot.as_pte(), Ordering::Relaxed);
440457
}
458+
Ok(())
441459
})
442460
}
443461

444462
pub(crate) fn unmap_pages(&mut self, iova_range: Range<u64>) -> Result {
445463
mod_pr_debug!("UATPageTable::unmap_pages: {:#x?}\n", iova_range);
446-
self.with_pages(iova_range, false, |iova, ptes| {
464+
self.with_pages(iova_range, false, false, |iova, ptes| {
447465
for (idx, pte) in ptes.iter().enumerate() {
448466
if pte.load(Ordering::Relaxed) & PTE_TYPE_LEAF_TABLE == 0 {
449467
pr_err!(
@@ -453,15 +471,114 @@ impl UatPageTable {
453471
}
454472
pte.store(0, Ordering::Relaxed);
455473
}
474+
Ok(())
456475
})
457476
}
477+
478+
pub(crate) fn dump_pages(&mut self, iova_range: Range<u64>) -> Result<KVVec<DumpedPage>> {
479+
let mut pages = KVVec::new();
480+
let oas_mask = self.oas_mask;
481+
let iova_base = self.va_range.start & !UAT_IASMSK;
482+
self.with_pages(iova_range, false, false, |iova, ptes| {
483+
let iova = iova | iova_base;
484+
for (idx, ppte) in ptes.iter().enumerate() {
485+
let pte = ppte.load(Ordering::Relaxed);
486+
if (pte & PTE_TYPE_LEAF_TABLE) != PTE_TYPE_LEAF_TABLE {
487+
continue;
488+
}
489+
let memattr = ((pte & UAT_MEMATTR_BITS) >> UAT_MEMATTR_SHIFT) as u8;
490+
491+
if !(memattr == MEMATTR_CACHED || memattr == MEMATTR_UNCACHED) {
492+
pages.push(
493+
DumpedPage {
494+
iova: iova + (idx * UAT_PGSZ) as u64,
495+
pte,
496+
data: None,
497+
},
498+
GFP_KERNEL,
499+
)?;
500+
continue;
501+
}
502+
let phys = pte & oas_mask & (!UAT_PGMSK as u64);
503+
// SAFETY: GPU pages are either firmware/preallocated pages
504+
// (which the kernel isn't concerned with and are either in
505+
// the page map or not, and if they aren't, borrow_phys()
506+
// will fail), or GPU page table pages (which we own),
507+
// or GEM buffer pages (which are locked while they are
508+
// mapped in the page table), so they should be safe to
509+
// borrow.
510+
//
511+
// This does trust the firmware not to have any weird
512+
// mappings in its own internal page tables, but since
513+
// those are managed by the uPPL which is privileged anyway,
514+
// this trust does not actually extend any trust boundary.
515+
let src_page = match unsafe { Page::borrow_phys(&phys) } {
516+
Some(page) => page,
517+
None => {
518+
pages.push(
519+
DumpedPage {
520+
iova: iova + (idx * UAT_PGSZ) as u64,
521+
pte,
522+
data: None,
523+
},
524+
GFP_KERNEL,
525+
)?;
526+
continue;
527+
}
528+
};
529+
let dst_page = Page::alloc_page(GFP_KERNEL)?;
530+
src_page.with_page_mapped(|psrc| -> Result {
531+
// SAFETY: This could technically still have a data
532+
// race with the firmware or other driver code (or
533+
// even userspace with timestamp buffers), but while
534+
// the Rust language technically says this is UB, in
535+
// the real world, using atomic reads for this is
536+
// guaranteed to never cause any harmful effects
537+
// other than possibly reading torn/unreliable data.
538+
// At least on ARM64 anyway.
539+
//
540+
// (Yes, I checked with Rust people about this. ~~ Lina)
541+
//
542+
let src_items = unsafe {
543+
core::slice::from_raw_parts(
544+
psrc as *const AtomicU64,
545+
UAT_PGSZ / core::mem::size_of::<AtomicU64>(),
546+
)
547+
};
548+
dst_page.with_page_mapped(|pdst| -> Result {
549+
let dst_items = unsafe {
550+
core::slice::from_raw_parts_mut(
551+
pdst as *mut u64,
552+
UAT_PGSZ / core::mem::size_of::<u64>(),
553+
)
554+
};
555+
for (si, di) in src_items.iter().zip(dst_items.iter_mut()) {
556+
*di = si.load(Ordering::Relaxed);
557+
}
558+
Ok(())
559+
})?;
560+
Ok(())
561+
})?;
562+
pages.push(
563+
DumpedPage {
564+
iova: iova + (idx * UAT_PGSZ) as u64,
565+
pte,
566+
data: Some(dst_page),
567+
},
568+
GFP_KERNEL,
569+
)?;
570+
}
571+
Ok(())
572+
})?;
573+
Ok(pages)
574+
}
458575
}
459576

460577
impl Drop for UatPageTable {
461578
fn drop(&mut self) {
462579
mod_pr_debug!("UATPageTable::drop range: {:#x?}\n", &self.va_range);
463580
if self
464-
.with_pages(self.va_range.clone(), true, |iova, ptes| {
581+
.with_pages(self.va_range.clone(), false, true, |iova, ptes| {
465582
for (idx, pte) in ptes.iter().enumerate() {
466583
if pte.load(Ordering::Relaxed) != 0 {
467584
pr_err!(
@@ -470,6 +587,7 @@ impl Drop for UatPageTable {
470587
);
471588
}
472589
}
590+
Ok(())
473591
})
474592
.is_err()
475593
{

0 commit comments

Comments
 (0)