Skip to content

Commit aaeef7a

Browse files
committed
drm/xe/migrate: rework size restrictions for sram pte emit
We allow the input size to not be aligned to PAGE_SIZE, which leads to various bugs in build_pt_update_batch_sram() for PAGE_SIZE > 4K systems. For example if ptes is exactly one gpu_page_size then the chunk size is rounded down to zero. The simplest fix looks to be forcing PAGE_SIZE aligned inputs. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://lore.kernel.org/r/20251022163836.191405-3-matthew.auld@intel.com
1 parent 3c767f7 commit aaeef7a

1 file changed

Lines changed: 8 additions & 5 deletions

File tree

drivers/gpu/drm/xe/xe_migrate.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1798,6 +1798,8 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
17981798
u32 ptes;
17991799
int i = 0;
18001800

1801+
xe_tile_assert(m->tile, PAGE_ALIGNED(size));
1802+
18011803
ptes = DIV_ROUND_UP(size, gpu_page_size);
18021804
while (ptes) {
18031805
u32 chunk = min(MAX_PTE_PER_SDI, ptes);
@@ -1811,12 +1813,13 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
18111813
ptes -= chunk;
18121814

18131815
while (chunk--) {
1814-
u64 addr = sram_addr[i].addr & ~(gpu_page_size - 1);
1815-
u64 pte, orig_addr = addr;
1816+
u64 addr = sram_addr[i].addr;
1817+
u64 pte;
18161818

18171819
xe_tile_assert(m->tile, sram_addr[i].proto ==
18181820
DRM_INTERCONNECT_SYSTEM);
18191821
xe_tile_assert(m->tile, addr);
1822+
xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
18201823

18211824
again:
18221825
pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
@@ -1827,7 +1830,7 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
18271830

18281831
if (gpu_page_size < PAGE_SIZE) {
18291832
addr += XE_PAGE_SIZE;
1830-
if (orig_addr + PAGE_SIZE != addr) {
1833+
if (!PAGE_ALIGNED(addr)) {
18311834
chunk--;
18321835
goto again;
18331836
}
@@ -1918,10 +1921,10 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
19181921

19191922
if (use_pde)
19201923
build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
1921-
sram_addr, len + sram_offset, 1);
1924+
sram_addr, npages << PAGE_SHIFT, 1);
19221925
else
19231926
build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
1924-
sram_addr, len + sram_offset, 0);
1927+
sram_addr, npages << PAGE_SHIFT, 0);
19251928

19261929
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
19271930
if (use_pde)

0 commit comments

Comments
 (0)