Skip to content

Commit c1c9fb9

Browse files
hoshinolinajannau
authored andcommitted
drm/asahi: pgtable: Add dumper
Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent 5b9228a commit c1c9fb9

1 file changed

Lines changed: 133 additions & 16 deletions

File tree

drivers/gpu/drm/asahi/pgtable.rs

Lines changed: 133 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@ use core::mem::size_of;
1111
use core::ops::Range;
1212
use core::sync::atomic::{AtomicU64, Ordering};
1313

14-
use kernel::addr::PhysicalAddr;
15-
use kernel::{error::Result, page::Page, prelude::*};
14+
use kernel::{addr::PhysicalAddr, error::Result, page::Page, prelude::*, types::Owned};
1615

1716
use crate::debug::*;
1817
use crate::util::align;
@@ -166,6 +165,12 @@ impl Default for Prot {
166165
}
167166
}
168167

168+
pub(crate) struct DumpedPage {
169+
pub(crate) iova: u64,
170+
pub(crate) pte: u64,
171+
pub(crate) data: Option<Owned<Page>>,
172+
}
173+
169174
pub(crate) struct UatPageTable {
170175
ttb: PhysicalAddr,
171176
ttb_owned: bool,
@@ -224,11 +229,22 @@ impl UatPageTable {
224229
self.ttb
225230
}
226231

227-
fn with_pages<F>(&mut self, iova_range: Range<u64>, free: bool, mut cb: F) -> Result
232+
fn with_pages<F>(
233+
&mut self,
234+
iova_range: Range<u64>,
235+
alloc: bool,
236+
free: bool,
237+
mut cb: F,
238+
) -> Result
228239
where
229-
F: FnMut(u64, &[Pte]),
240+
F: FnMut(u64, &[Pte]) -> Result,
230241
{
231-
mod_pr_debug!("UATPageTable::with_pages: {:#x?} {}\n", iova_range, free);
242+
mod_pr_debug!(
243+
"UATPageTable::with_pages: {:#x?} alloc={} free={}\n",
244+
iova_range,
245+
alloc,
246+
free
247+
);
232248
if (iova_range.start | iova_range.end) & (UAT_PGMSK as u64) != 0 {
233249
pr_err!(
234250
"UATPageTable::with_pages: iova range not aligned: {:#x?}\n",
@@ -288,10 +304,12 @@ impl UatPageTable {
288304
pt_addr[level] =
289305
upt.with_pointer_into_page(upidx * PTE_SIZE, PTE_SIZE, |p| {
290306
let uptep = p as *const _ as *const Pte;
307+
// SAFETY: with_pointer_into_page() ensures the pointer is valid,
308+
// and our index is aligned so it is safe to deref as an AtomicU64.
291309
let upte = unsafe { &*uptep };
292310
let mut upte_val = upte.load(Ordering::Relaxed);
293311
// Allocate if requested
294-
if upte_val == 0 && !free {
312+
if upte_val == 0 && alloc {
295313
let pt_page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
296314
mod_pr_debug!("UATPageTable::with_pages: alloc PT at {:#x}\n", pt_page.phys());
297315
let pt_paddr = Page::into_phys(pt_page);
@@ -300,7 +318,7 @@ impl UatPageTable {
300318
}
301319
if upte_val & PTE_TYPE_BITS == PTE_TYPE_LEAF_TABLE {
302320
Ok(Some(upte_val & self.oas_mask & (!UAT_PGMSK as u64)))
303-
} else if upte_val == 0 {
321+
} else if upte_val == 0 || (!alloc && !free) {
304322
mod_pr_debug!("UATPageTable::with_pages: no level {}\n", level);
305323
Ok(None)
306324
} else {
@@ -334,8 +352,6 @@ impl UatPageTable {
334352
let max_count = UAT_NPTE - idx;
335353
let count = (((end - iova) >> UAT_PGBIT) as usize).min(max_count);
336354
let phys = pt_addr[0].unwrap();
337-
// SAFETY: Page table addresses are either allocated by us, or
338-
// firmware-managed and safe to borrow a struct page from.
339355
mod_pr_debug!(
340356
"UATPageTable::with_pages: leaf PT at {:#x} idx {:#x} count {:#x} iova {:#x}\n",
341357
phys,
@@ -351,7 +367,7 @@ impl UatPageTable {
351367
// SAFETY: We know this is a valid pointer to PTEs and the range is valid and
352368
// checked by with_pointer_into_page().
353369
let ptes = unsafe { core::slice::from_raw_parts(ptep, count) };
354-
cb(iova, ptes);
370+
cb(iova, ptes)?;
355371
Ok(())
356372
})?;
357373

@@ -362,12 +378,12 @@ impl UatPageTable {
362378
if free {
363379
for level in (0..UAT_LEVELS - 1).rev() {
364380
if let Some(phys) = pt_addr[level] {
365-
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
366381
mod_pr_debug!(
367382
"UATPageTable::with_pages: free level {} {:#x?}\n",
368383
level,
369384
phys
370385
);
386+
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
371387
unsafe { Page::from_phys(phys) };
372388
}
373389
}
@@ -378,7 +394,7 @@ impl UatPageTable {
378394

379395
pub(crate) fn alloc_pages(&mut self, iova_range: Range<u64>) -> Result {
380396
mod_pr_debug!("UATPageTable::alloc_pages: {:#x?}\n", iova_range);
381-
self.with_pages(iova_range, false, |_, _| {})
397+
self.with_pages(iova_range, true, false, |_, _| Ok(()))
382398
}
383399

384400
fn pte_bits(&self) -> u64 {
@@ -411,7 +427,7 @@ impl UatPageTable {
411427

412428
let pte_bits = self.pte_bits();
413429

414-
self.with_pages(iova_range, false, |iova, ptes| {
430+
self.with_pages(iova_range, true, false, |iova, ptes| {
415431
for (idx, pte) in ptes.iter().enumerate() {
416432
let ptev = pte.load(Ordering::Relaxed);
417433
if ptev != 0 {
@@ -426,6 +442,7 @@ impl UatPageTable {
426442
phys += UAT_PGSZ as PhysicalAddr;
427443
}
428444
}
445+
Ok(())
429446
})
430447
}
431448

@@ -435,7 +452,7 @@ impl UatPageTable {
435452
iova_range,
436453
prot
437454
);
438-
self.with_pages(iova_range, false, |iova, ptes| {
455+
self.with_pages(iova_range, true, false, |iova, ptes| {
439456
for (idx, pte) in ptes.iter().enumerate() {
440457
let ptev = pte.load(Ordering::Relaxed);
441458
if ptev & PTE_TYPE_BITS != PTE_TYPE_LEAF_TABLE {
@@ -448,12 +465,13 @@ impl UatPageTable {
448465
}
449466
pte.store((ptev & !UAT_PROT_BITS) | prot.as_pte(), Ordering::Relaxed);
450467
}
468+
Ok(())
451469
})
452470
}
453471

454472
pub(crate) fn unmap_pages(&mut self, iova_range: Range<u64>) -> Result {
455473
mod_pr_debug!("UATPageTable::unmap_pages: {:#x?}\n", iova_range);
456-
self.with_pages(iova_range, false, |iova, ptes| {
474+
self.with_pages(iova_range, false, false, |iova, ptes| {
457475
for (idx, pte) in ptes.iter().enumerate() {
458476
if pte.load(Ordering::Relaxed) & PTE_TYPE_LEAF_TABLE == 0 {
459477
pr_err!(
@@ -463,15 +481,113 @@ impl UatPageTable {
463481
}
464482
pte.store(0, Ordering::Relaxed);
465483
}
484+
Ok(())
466485
})
467486
}
487+
488+
pub(crate) fn dump_pages(&mut self, iova_range: Range<u64>) -> Result<KVVec<DumpedPage>> {
489+
let mut pages = KVVec::new();
490+
let oas_mask = self.oas_mask;
491+
let iova_base = self.va_range.start & !UAT_IASMSK;
492+
self.with_pages(iova_range, false, false, |iova, ptes| {
493+
let iova = iova | iova_base;
494+
for (idx, ppte) in ptes.iter().enumerate() {
495+
let pte = ppte.load(Ordering::Relaxed);
496+
if (pte & PTE_TYPE_LEAF_TABLE) != PTE_TYPE_LEAF_TABLE {
497+
continue;
498+
}
499+
let memattr = ((pte & UAT_MEMATTR_BITS) >> UAT_MEMATTR_SHIFT) as u8;
500+
501+
if !(memattr == MEMATTR_CACHED || memattr == MEMATTR_UNCACHED) {
502+
pages.push(
503+
DumpedPage {
504+
iova: iova + (idx * UAT_PGSZ) as u64,
505+
pte,
506+
data: None,
507+
},
508+
GFP_KERNEL,
509+
)?;
510+
continue;
511+
}
512+
let phys = pte & oas_mask & (!UAT_PGMSK as u64);
513+
// SAFETY: GPU pages are either firmware/preallocated pages
514+
// (which the kernel isn't concerned with and are either in
515+
// the page map or not, and if they aren't, borrow_phys()
516+
// will fail), or GPU page table pages (which we own),
517+
// or GEM buffer pages (which are locked while they are
518+
// mapped in the page table), so they should be safe to
519+
// borrow.
520+
//
521+
// This does trust the firmware not to have any weird
522+
// mappings in its own internal page tables, but since
523+
// those are managed by the uPPL which is privileged anyway,
524+
// this trust does not actually extend any trust boundary.
525+
let src_page = match unsafe { Page::borrow_phys(&phys) } {
526+
Some(page) => page,
527+
None => {
528+
pages.push(
529+
DumpedPage {
530+
iova: iova + (idx * UAT_PGSZ) as u64,
531+
pte,
532+
data: None,
533+
},
534+
GFP_KERNEL,
535+
)?;
536+
continue;
537+
}
538+
};
539+
let dst_page = Page::alloc_page(GFP_KERNEL)?;
540+
src_page.with_page_mapped(|psrc| -> Result {
541+
// SAFETY: This could technically still have a data race with the firmware
542+
// or other driver code (or even userspace with timestamp buffers), but while
543+
// the Rust language technically says this is UB, in the real world, using
544+
// atomic reads for this is guaranteed to never cause any harmful effects
545+
// other than possibly reading torn/unreliable data. At least on ARM64 anyway.
546+
//
547+
// (Yes, I checked with Rust people about this. ~~ Lina)
548+
//
549+
let src_items = unsafe {
550+
core::slice::from_raw_parts(
551+
psrc as *const AtomicU64,
552+
UAT_PGSZ / core::mem::size_of::<AtomicU64>(),
553+
)
554+
};
555+
dst_page.with_page_mapped(|pdst| -> Result {
556+
// SAFETY: We own the destination page, so it is safe to view its contents
557+
// as a u64 slice.
558+
let dst_items = unsafe {
559+
core::slice::from_raw_parts_mut(
560+
pdst as *mut u64,
561+
UAT_PGSZ / core::mem::size_of::<u64>(),
562+
)
563+
};
564+
for (si, di) in src_items.iter().zip(dst_items.iter_mut()) {
565+
*di = si.load(Ordering::Relaxed);
566+
}
567+
Ok(())
568+
})?;
569+
Ok(())
570+
})?;
571+
pages.push(
572+
DumpedPage {
573+
iova: iova + (idx * UAT_PGSZ) as u64,
574+
pte,
575+
data: Some(dst_page),
576+
},
577+
GFP_KERNEL,
578+
)?;
579+
}
580+
Ok(())
581+
})?;
582+
Ok(pages)
583+
}
468584
}
469585

470586
impl Drop for UatPageTable {
471587
fn drop(&mut self) {
472588
mod_pr_debug!("UATPageTable::drop range: {:#x?}\n", &self.va_range);
473589
if self
474-
.with_pages(self.va_range.clone(), true, |iova, ptes| {
590+
.with_pages(self.va_range.clone(), false, true, |iova, ptes| {
475591
for (idx, pte) in ptes.iter().enumerate() {
476592
if pte.load(Ordering::Relaxed) != 0 {
477593
pr_err!(
@@ -480,6 +596,7 @@ impl Drop for UatPageTable {
480596
);
481597
}
482598
}
599+
Ok(())
483600
})
484601
.is_err()
485602
{

0 commit comments

Comments
 (0)