Skip to content

Commit a94e9ef

Browse files
hoshinolinajannau
authored andcommitted
drm/asahi: pgtable: Add dumper
Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent fc316ed commit a94e9ef

1 file changed

Lines changed: 139 additions & 15 deletions

File tree

drivers/gpu/drm/asahi/pgtable.rs

Lines changed: 139 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,12 +14,16 @@ use core::sync::atomic::{
1414
Ordering, //
1515
};
1616

17-
use kernel::addr::PhysicalAddr;
1817
use kernel::{
18+
addr::PhysicalAddr,
1919
error::Result,
2020
page::Page,
2121
prelude::*, //
2222
};
23+
#[cfg(CONFIG_DEV_COREDUMP)]
24+
use kernel::{
25+
types::Owned,
26+
};
2327

2428
use crate::debug::*;
2529
use crate::util::align;
@@ -173,6 +177,13 @@ impl Default for Prot {
173177
}
174178
}
175179

180+
#[cfg(CONFIG_DEV_COREDUMP)]
181+
pub(crate) struct DumpedPage {
182+
pub(crate) iova: u64,
183+
pub(crate) pte: u64,
184+
pub(crate) data: Option<Owned<Page>>,
185+
}
186+
176187
pub(crate) struct UatPageTable {
177188
ttb: PhysicalAddr,
178189
ttb_owned: bool,
@@ -227,11 +238,22 @@ impl UatPageTable {
227238
self.ttb
228239
}
229240

230-
fn with_pages<F>(&mut self, iova_range: Range<u64>, free: bool, mut cb: F) -> Result
241+
fn with_pages<F>(
242+
&mut self,
243+
iova_range: Range<u64>,
244+
alloc: bool,
245+
free: bool,
246+
mut cb: F,
247+
) -> Result
231248
where
232-
F: FnMut(u64, &[Pte]),
249+
F: FnMut(u64, &[Pte]) -> Result,
233250
{
234-
mod_pr_debug!("UATPageTable::with_pages: {:#x?} {}\n", iova_range, free);
251+
mod_pr_debug!(
252+
"UATPageTable::with_pages: {:#x?} alloc={} free={}\n",
253+
iova_range,
254+
alloc,
255+
free
256+
);
235257
if (iova_range.start | iova_range.end) & (UAT_PGMSK as u64) != 0 {
236258
pr_err!(
237259
"UATPageTable::with_pages: iova range not aligned: {:#x?}\n",
@@ -291,10 +313,12 @@ impl UatPageTable {
291313
pt_addr[level] =
292314
upt.with_pointer_into_page(upidx * PTE_SIZE, PTE_SIZE, |p| {
293315
let uptep = p as *const _ as *const Pte;
316+
// SAFETY: with_pointer_into_page() ensures the pointer is valid,
317+
// and our index is aligned so it is safe to deref as an AtomicU64.
294318
let upte = unsafe { &*uptep };
295319
let mut upte_val = upte.load(Ordering::Relaxed);
296320
// Allocate if requested
297-
if upte_val == 0 && !free {
321+
if upte_val == 0 && alloc {
298322
let pt_page = Page::alloc_page(GFP_KERNEL | __GFP_ZERO)?;
299323
mod_pr_debug!("UATPageTable::with_pages: alloc PT at {:#x}\n", pt_page.phys());
300324
let pt_paddr = Page::into_phys(pt_page);
@@ -303,7 +327,7 @@ impl UatPageTable {
303327
}
304328
if upte_val & PTE_TYPE_BITS == PTE_TYPE_LEAF_TABLE {
305329
Ok(Some(upte_val & self.oas_mask & (!UAT_PGMSK as u64)))
306-
} else if upte_val == 0 {
330+
} else if upte_val == 0 || (!alloc && !free) {
307331
mod_pr_debug!("UATPageTable::with_pages: no level {}\n", level);
308332
Ok(None)
309333
} else {
@@ -337,8 +361,6 @@ impl UatPageTable {
337361
let max_count = UAT_NPTE - idx;
338362
let count = (((end - iova) >> UAT_PGBIT) as usize).min(max_count);
339363
let phys = pt_addr[0].unwrap();
340-
// SAFETY: Page table addresses are either allocated by us, or
341-
// firmware-managed and safe to borrow a struct page from.
342364
mod_pr_debug!(
343365
"UATPageTable::with_pages: leaf PT at {:#x} idx {:#x} count {:#x} iova {:#x}\n",
344366
phys,
@@ -354,7 +376,7 @@ impl UatPageTable {
354376
// SAFETY: We know this is a valid pointer to PTEs and the range is valid and
355377
// checked by with_pointer_into_page().
356378
let ptes = unsafe { core::slice::from_raw_parts(ptep, count) };
357-
cb(iova, ptes);
379+
cb(iova, ptes)?;
358380
Ok(())
359381
})?;
360382

@@ -365,12 +387,12 @@ impl UatPageTable {
365387
if free {
366388
for level in (0..UAT_LEVELS - 1).rev() {
367389
if let Some(phys) = pt_addr[level] {
368-
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
369390
mod_pr_debug!(
370391
"UATPageTable::with_pages: free level {} {:#x?}\n",
371392
level,
372393
phys
373394
);
395+
// SAFETY: Page tables for our VA ranges always come from Page::into_phys().
374396
unsafe { Page::from_phys(phys) };
375397
}
376398
}
@@ -381,7 +403,7 @@ impl UatPageTable {
381403

382404
pub(crate) fn alloc_pages(&mut self, iova_range: Range<u64>) -> Result {
383405
mod_pr_debug!("UATPageTable::alloc_pages: {:#x?}\n", iova_range);
384-
self.with_pages(iova_range, false, |_, _| {})
406+
self.with_pages(iova_range, true, false, |_, _| Ok(()))
385407
}
386408

387409
fn pte_bits(&self) -> u64 {
@@ -414,7 +436,7 @@ impl UatPageTable {
414436

415437
let pte_bits = self.pte_bits();
416438

417-
self.with_pages(iova_range, false, |iova, ptes| {
439+
self.with_pages(iova_range, true, false, |iova, ptes| {
418440
for (idx, pte) in ptes.iter().enumerate() {
419441
let ptev = pte.load(Ordering::Relaxed);
420442
if ptev != 0 {
@@ -429,6 +451,7 @@ impl UatPageTable {
429451
phys += UAT_PGSZ as PhysicalAddr;
430452
}
431453
}
454+
Ok(())
432455
})
433456
}
434457

@@ -438,7 +461,7 @@ impl UatPageTable {
438461
iova_range,
439462
prot
440463
);
441-
self.with_pages(iova_range, false, |iova, ptes| {
464+
self.with_pages(iova_range, true, false, |iova, ptes| {
442465
for (idx, pte) in ptes.iter().enumerate() {
443466
let ptev = pte.load(Ordering::Relaxed);
444467
if ptev & PTE_TYPE_BITS != PTE_TYPE_LEAF_TABLE {
@@ -451,12 +474,13 @@ impl UatPageTable {
451474
}
452475
pte.store((ptev & !UAT_PROT_BITS) | prot.as_pte(), Ordering::Relaxed);
453476
}
477+
Ok(())
454478
})
455479
}
456480

457481
pub(crate) fn unmap_pages(&mut self, iova_range: Range<u64>) -> Result {
458482
mod_pr_debug!("UATPageTable::unmap_pages: {:#x?}\n", iova_range);
459-
self.with_pages(iova_range, false, |iova, ptes| {
483+
self.with_pages(iova_range, false, false, |iova, ptes| {
460484
for (idx, pte) in ptes.iter().enumerate() {
461485
if pte.load(Ordering::Relaxed) & PTE_TYPE_LEAF_TABLE == 0 {
462486
pr_err!(
@@ -466,15 +490,114 @@ impl UatPageTable {
466490
}
467491
pte.store(0, Ordering::Relaxed);
468492
}
493+
Ok(())
469494
})
470495
}
496+
497+
#[cfg(CONFIG_DEV_COREDUMP)]
498+
pub(crate) fn dump_pages(&mut self, iova_range: Range<u64>) -> Result<KVVec<DumpedPage>> {
499+
let mut pages = KVVec::new();
500+
let oas_mask = self.oas_mask;
501+
let iova_base = self.va_range.start & !UAT_IASMSK;
502+
self.with_pages(iova_range, false, false, |iova, ptes| {
503+
let iova = iova | iova_base;
504+
for (idx, ppte) in ptes.iter().enumerate() {
505+
let pte = ppte.load(Ordering::Relaxed);
506+
if (pte & PTE_TYPE_LEAF_TABLE) != PTE_TYPE_LEAF_TABLE {
507+
continue;
508+
}
509+
let memattr = ((pte & UAT_MEMATTR_BITS) >> UAT_MEMATTR_SHIFT) as u8;
510+
511+
if !(memattr == MEMATTR_CACHED || memattr == MEMATTR_UNCACHED) {
512+
pages.push(
513+
DumpedPage {
514+
iova: iova + (idx * UAT_PGSZ) as u64,
515+
pte,
516+
data: None,
517+
},
518+
GFP_KERNEL,
519+
)?;
520+
continue;
521+
}
522+
let phys = pte & oas_mask & (!UAT_PGMSK as u64);
523+
// SAFETY: GPU pages are either firmware/preallocated pages
524+
// (which the kernel isn't concerned with and are either in
525+
// the page map or not, and if they aren't, borrow_phys()
526+
// will fail), or GPU page table pages (which we own),
527+
// or GEM buffer pages (which are locked while they are
528+
// mapped in the page table), so they should be safe to
529+
// borrow.
530+
//
531+
// This does trust the firmware not to have any weird
532+
// mappings in its own internal page tables, but since
533+
// those are managed by the uPPL which is privileged anyway,
534+
// this trust does not actually extend any trust boundary.
535+
let src_page = match unsafe { Page::borrow_phys(&phys) } {
536+
Some(page) => page,
537+
None => {
538+
pages.push(
539+
DumpedPage {
540+
iova: iova + (idx * UAT_PGSZ) as u64,
541+
pte,
542+
data: None,
543+
},
544+
GFP_KERNEL,
545+
)?;
546+
continue;
547+
}
548+
};
549+
let dst_page = Page::alloc_page(GFP_KERNEL)?;
550+
src_page.with_page_mapped(|psrc| -> Result {
551+
// SAFETY: This could technically still have a data race with the firmware
552+
// or other driver code (or even userspace with timestamp buffers), but while
553+
// the Rust language technically says this is UB, in the real world, using
554+
// atomic reads for this is guaranteed to never cause any harmful effects
555+
// other than possibly reading torn/unreliable data. At least on ARM64 anyway.
556+
//
557+
// (Yes, I checked with Rust people about this. ~~ Lina)
558+
//
559+
let src_items = unsafe {
560+
core::slice::from_raw_parts(
561+
psrc as *const AtomicU64,
562+
UAT_PGSZ / core::mem::size_of::<AtomicU64>(),
563+
)
564+
};
565+
dst_page.with_page_mapped(|pdst| -> Result {
566+
// SAFETY: We own the destination page, so it is safe to view its contents
567+
// as a u64 slice.
568+
let dst_items = unsafe {
569+
core::slice::from_raw_parts_mut(
570+
pdst as *mut u64,
571+
UAT_PGSZ / core::mem::size_of::<u64>(),
572+
)
573+
};
574+
for (si, di) in src_items.iter().zip(dst_items.iter_mut()) {
575+
*di = si.load(Ordering::Relaxed);
576+
}
577+
Ok(())
578+
})?;
579+
Ok(())
580+
})?;
581+
pages.push(
582+
DumpedPage {
583+
iova: iova + (idx * UAT_PGSZ) as u64,
584+
pte,
585+
data: Some(dst_page),
586+
},
587+
GFP_KERNEL,
588+
)?;
589+
}
590+
Ok(())
591+
})?;
592+
Ok(pages)
593+
}
471594
}
472595

473596
impl Drop for UatPageTable {
474597
fn drop(&mut self) {
475598
mod_pr_debug!("UATPageTable::drop range: {:#x?}\n", &self.va_range);
476599
if self
477-
.with_pages(self.va_range.clone(), true, |iova, ptes| {
600+
.with_pages(self.va_range.clone(), false, true, |iova, ptes| {
478601
for (idx, pte) in ptes.iter().enumerate() {
479602
if pte.load(Ordering::Relaxed) != 0 {
480603
pr_err!(
@@ -483,6 +606,7 @@ impl Drop for UatPageTable {
483606
);
484607
}
485608
}
609+
Ok(())
486610
})
487611
.is_err()
488612
{

0 commit comments

Comments
 (0)