Skip to content

Commit e56bf25

Browse files
committed
drm/asahi: Refactor address types
VAs are u64, PAs and sizes are usize. Signed-off-by: Asahi Lina <lina@asahilina.net>
1 parent 93b390c commit e56bf25

5 files changed

Lines changed: 46 additions & 52 deletions

File tree

drivers/gpu/drm/asahi/alloc.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -577,7 +577,7 @@ impl Allocator for SimpleAllocator {
577577
let iova = mapping.iova();
578578

579579
let ptr = unsafe { p.add(offset) };
580-
let gpu_ptr = (iova + offset) as u64;
580+
let gpu_ptr = iova + offset as u64;
581581

582582
mod_dev_dbg!(
583583
&self.dev,

drivers/gpu/drm/asahi/file.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ impl Drop for Vm {
3838
// When the user Vm is dropped, unmap everything in the user range
3939
if self
4040
.vm
41-
.unmap_range(mmu::IOVA_USER_BASE as u64, VM_USER_END)
41+
.unmap_range(mmu::IOVA_USER_BASE, VM_USER_END)
4242
.is_err()
4343
{
4444
pr_err!("Vm::Drop: vm.unmap_range() failed\n");

drivers/gpu/drm/asahi/gpu.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ pub(crate) struct RtkitObject {
300300

301301
impl rtkit::Buffer for RtkitObject {
302302
fn iova(&self) -> Result<usize> {
303-
Ok(self.mapping.iova())
303+
Ok(self.mapping.iova() as usize)
304304
}
305305
fn buf(&mut self) -> Result<&mut [u8]> {
306306
let vmap = self.obj.vmap()?;
@@ -535,20 +535,20 @@ impl GpuManager::ver {
535535

536536
#[ver(V >= V13_0B4)]
537537
if let Some(base) = cfg.sram_base {
538-
let size = cfg.sram_size.unwrap() as usize;
538+
let size = cfg.sram_size.unwrap();
539539
let iova = mgr.as_mut().alloc_mmio_iova(size);
540540

541-
let mapping =
542-
mgr.uat
543-
.kernel_vm()
544-
.map_io(iova, base as usize, size, mmu::PROT_FW_SHARED_RW)?;
541+
let mapping = mgr
542+
.uat
543+
.kernel_vm()
544+
.map_io(iova, base, size, mmu::PROT_FW_SHARED_RW)?;
545545

546546
mgr.as_mut()
547547
.initdata_mut()
548548
.runtime_pointers
549549
.hwdata_b
550550
.with_mut(|raw, _| {
551-
raw.sgx_sram_ptr = U64(mapping.iova() as u64);
551+
raw.sgx_sram_ptr = U64(mapping.iova());
552552
});
553553

554554
mgr.as_mut().io_mappings_mut().try_push(mapping)?;

drivers/gpu/drm/asahi/hw/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -307,9 +307,9 @@ pub(crate) struct HwConfig {
307307
/// Required MMIO mappings for this GPU/firmware.
308308
pub(crate) io_mappings: &'static [Option<IOMapping>],
309309
/// SRAM base
310-
pub(crate) sram_base: Option<u64>,
310+
pub(crate) sram_base: Option<usize>,
311311
/// SRAM size
312-
pub(crate) sram_size: Option<u64>,
312+
pub(crate) sram_size: Option<usize>,
313313
}
314314

315315
/// Dynamic (fetched from hardware/DT) configuration.

drivers/gpu/drm/asahi/mmu.rs

Lines changed: 35 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -66,15 +66,15 @@ pub(crate) const UAT_IAS: usize = 39;
6666
pub(crate) const UAT_IAS_KERN: usize = 36;
6767

6868
/// Lower/user base VA
69-
pub(crate) const IOVA_USER_BASE: usize = UAT_PGSZ;
69+
pub(crate) const IOVA_USER_BASE: u64 = UAT_PGSZ as u64;
7070
/// Lower/user top VA
71-
pub(crate) const IOVA_USER_TOP: usize = (1 << UAT_IAS) - 1;
71+
pub(crate) const IOVA_USER_TOP: u64 = 1 << (UAT_IAS as u64);
7272
/// Upper/kernel base VA
7373
// const IOVA_TTBR1_BASE: usize = 0xffffff8000000000;
7474
/// Driver-managed kernel base VA
75-
const IOVA_KERN_BASE: usize = 0xffffffa000000000;
75+
const IOVA_KERN_BASE: u64 = 0xffffffa000000000;
7676
/// Driver-managed kernel top VA
77-
const IOVA_KERN_TOP: usize = 0xffffffafffffffff;
77+
const IOVA_KERN_TOP: u64 = 0xffffffb000000000;
7878

7979
const TTBR_VALID: u64 = 0x1; // BIT(0)
8080
const TTBR_ASID_SHIFT: usize = 48;
@@ -178,8 +178,8 @@ const PAGETABLES_SIZE: usize = UAT_PGSZ;
178178
struct VmInner {
179179
dev: driver::AsahiDevRef,
180180
is_kernel: bool,
181-
min_va: usize,
182-
max_va: usize,
181+
min_va: u64,
182+
max_va: u64,
183183
page_table: AppleUAT<Uat>,
184184
mm: mm::Allocator<(), KernelMappingInner>,
185185
uat_inner: Arc<UatInner>,
@@ -229,7 +229,7 @@ impl gpuvm::DriverGpuVm for VmInner {
229229
op: &mut gpuvm::OpMap<Self>,
230230
ctx: &mut Self::StepContext,
231231
) -> Result {
232-
let mut iova = op.addr() as usize;
232+
let mut iova = op.addr();
233233
let mut left = op.range() as usize;
234234
let mut offset = op.offset() as usize;
235235

@@ -270,7 +270,7 @@ impl gpuvm::DriverGpuVm for VmInner {
270270
self.map_pages(iova, addr, UAT_PGSZ, len >> UAT_PGBIT, ctx.prot)?;
271271

272272
left -= len;
273-
iova += len;
273+
iova += len as u64;
274274
}
275275

276276
let gpuva = ctx.new_va.take().expect("Multiple step_map calls");
@@ -303,11 +303,7 @@ impl gpuvm::DriverGpuVm for VmInner {
303303

304304
mod_dev_dbg!(self.dev, "MMU: unmap: {:#x}:{:#x}\n", va.addr(), va.range());
305305

306-
self.unmap_pages(
307-
va.addr() as usize,
308-
UAT_PGSZ,
309-
(va.range() as usize) >> UAT_PGBIT,
310-
)?;
306+
self.unmap_pages(va.addr(), UAT_PGSZ, (va.range() >> UAT_PGBIT) as usize)?;
311307

312308
if let Some(asid) = self.slot() {
313309
mem::tlbi_range(asid as u8, va.addr() as usize, va.range() as usize);
@@ -362,11 +358,7 @@ impl gpuvm::DriverGpuVm for VmInner {
362358
orig_range
363359
);
364360

365-
self.unmap_pages(
366-
unmap_start as usize,
367-
UAT_PGSZ,
368-
(unmap_range as usize) >> UAT_PGBIT,
369-
)?;
361+
self.unmap_pages(unmap_start, UAT_PGSZ, (unmap_range >> UAT_PGBIT) as usize)?;
370362

371363
if op.unmap().unmap_and_unlink_va().is_none() {
372364
dev_err!(self.dev, "step_unmap: could not unlink gpuva");
@@ -419,8 +411,8 @@ impl VmInner {
419411
}
420412

421413
/// Map an IOVA to the shifted address the underlying io_pgtable uses.
422-
fn map_iova(&self, iova: usize, size: usize) -> Result<usize> {
423-
if iova < self.min_va || (iova + size - 1) > self.max_va {
414+
fn map_iova(&self, iova: u64, size: usize) -> Result<u64> {
415+
if iova < self.min_va || (iova + size as u64) > self.max_va {
424416
Err(EINVAL)
425417
} else if self.is_kernel {
426418
Ok(iova - self.min_va)
@@ -432,7 +424,7 @@ impl VmInner {
432424
/// Map a contiguous range of virtual->physical pages.
433425
fn map_pages(
434426
&mut self,
435-
mut iova: usize,
427+
mut iova: u64,
436428
mut paddr: usize,
437429
pgsize: usize,
438430
pgcount: usize,
@@ -441,24 +433,26 @@ impl VmInner {
441433
let mut left = pgcount;
442434
while left > 0 {
443435
let mapped_iova = self.map_iova(iova, pgsize * left)?;
444-
let mapped = self
445-
.page_table
446-
.map_pages(mapped_iova, paddr, pgsize, left, prot)?;
436+
let mapped =
437+
self.page_table
438+
.map_pages(mapped_iova as usize, paddr, pgsize, left, prot)?;
447439
assert!(mapped <= left * pgsize);
448440

449441
left -= mapped / pgsize;
450442
paddr += mapped;
451-
iova += mapped;
443+
iova += mapped as u64;
452444
}
453445
Ok(pgcount * pgsize)
454446
}
455447

456448
/// Unmap a contiguous range of pages.
457-
fn unmap_pages(&mut self, mut iova: usize, pgsize: usize, pgcount: usize) -> Result<usize> {
449+
fn unmap_pages(&mut self, mut iova: u64, pgsize: usize, pgcount: usize) -> Result<usize> {
458450
let mut left = pgcount;
459451
while left > 0 {
460452
let mapped_iova = self.map_iova(iova, pgsize * left)?;
461-
let mut unmapped = self.page_table.unmap_pages(mapped_iova, pgsize, left);
453+
let mut unmapped = self
454+
.page_table
455+
.unmap_pages(mapped_iova as usize, pgsize, left);
462456
if unmapped == 0 {
463457
dev_err!(
464458
self.dev,
@@ -471,23 +465,23 @@ impl VmInner {
471465
assert!(unmapped <= left * pgsize);
472466

473467
left -= unmapped / pgsize;
474-
iova += unmapped;
468+
iova += unmapped as u64;
475469
}
476470

477471
Ok(pgcount * pgsize)
478472
}
479473

480474
/// Map an `mm::Node` representing an mapping in VA space.
481475
fn map_node(&mut self, node: &mm::Node<(), KernelMappingInner>, prot: u32) -> Result {
482-
let mut iova = node.start() as usize;
476+
let mut iova = node.start();
483477
let guard = node.bo.as_ref().ok_or(EINVAL)?.inner().sgt.lock();
484478
let sgt = guard.as_ref().ok_or(EINVAL)?;
485479

486480
for range in sgt.iter() {
487481
let addr = range.dma_address();
488482
let len = range.dma_len();
489483

490-
if (addr | len | iova) & UAT_PGMSK != 0 {
484+
if (addr | len | iova as usize) & UAT_PGMSK != 0 {
491485
dev_err!(
492486
self.dev,
493487
"MMU: KernelMapping {:#x}:{:#x} -> {:#x} is not page-aligned\n",
@@ -508,7 +502,7 @@ impl VmInner {
508502

509503
self.map_pages(iova, addr, UAT_PGSZ, len >> UAT_PGBIT, prot)?;
510504

511-
iova += len;
505+
iova += len as u64;
512506
}
513507
Ok(())
514508
}
@@ -589,8 +583,8 @@ pub(crate) struct KernelMapping(mm::Node<(), KernelMappingInner>);
589583

590584
impl KernelMapping {
591585
/// Returns the IOVA base of this mapping
592-
pub(crate) fn iova(&self) -> usize {
593-
self.0.start() as usize
586+
pub(crate) fn iova(&self) -> u64 {
587+
self.0.start()
594588
}
595589

596590
/// Returns the size of this mapping in bytes
@@ -700,13 +694,13 @@ impl KernelMapping {
700694
// Lock this flush slot, and write the range to it
701695
let flush = self.0.uat_inner.lock_flush(flush_slot);
702696
let pages = self.size() >> UAT_PGBIT;
703-
flush.begin_flush(self.iova() as u64, self.size() as u64);
697+
flush.begin_flush(self.iova(), self.size() as u64);
704698
if pages >= 0x10000 {
705699
dev_err!(owner.dev, "MMU: Flush too big ({:#x} pages))\n", pages);
706700
}
707701

708702
let cmd = fw::channels::FwCtlMsg {
709-
addr: fw::types::U64(self.iova() as u64),
703+
addr: fw::types::U64(self.iova()),
710704
unk_8: 0,
711705
slot: flush_slot,
712706
page_count: pages as u16,
@@ -784,7 +778,7 @@ impl Drop for KernelMapping {
784778
}
785779

786780
if let Some(asid) = owner.slot() {
787-
mem::tlbi_range(asid as u8, self.iova(), self.size());
781+
mem::tlbi_range(asid as u8, self.iova() as usize, self.size());
788782
mod_dev_dbg!(
789783
owner.dev,
790784
"MMU: flush range: asid={:#x} start={:#x} len={:#x}\n",
@@ -1023,7 +1017,7 @@ impl Vm {
10231017
IOVA_USER_TOP
10241018
};
10251019

1026-
let mm = mm::Allocator::new(min_va as u64, (max_va - min_va + 1) as u64, ())?;
1020+
let mm = mm::Allocator::new(min_va, max_va - min_va, ())?;
10271021

10281022
let binding = Arc::pin_init(Mutex::new_named(
10291023
VmBinding {
@@ -1042,8 +1036,8 @@ impl Vm {
10421036
c_str!("Asahi::GpuVm"),
10431037
dev,
10441038
&*(dummy_obj.gem),
1045-
min_va as u64,
1046-
(max_va - min_va + 1) as u64,
1039+
min_va,
1040+
max_va - min_va,
10471041
0,
10481042
0,
10491043
init!(VmInner {
@@ -1181,7 +1175,7 @@ impl Vm {
11811175

11821176
ctx.vm_bo = Some(vm_bo);
11831177

1184-
if (addr | size | offset) as usize & UAT_PGMSK != 0 {
1178+
if (addr | size | offset) & (UAT_PGMSK as u64) != 0 {
11851179
dev_err!(
11861180
inner.dev,
11871181
"MMU: Map step {:#x} [{:#x}] -> {:#x} is not page-aligned\n",
@@ -1245,7 +1239,7 @@ impl Vm {
12451239
0,
12461240
)?;
12471241

1248-
inner.map_pages(iova as usize, phys, UAT_PGSZ, size >> UAT_PGBIT, prot)?;
1242+
inner.map_pages(iova, phys, UAT_PGSZ, size >> UAT_PGBIT, prot)?;
12491243

12501244
Ok(KernelMapping(node))
12511245
}

0 commit comments

Comments
 (0)