Skip to content

Commit 57a5f45

Browse files
lukaszlagunamwiniars
authored andcommitted
drm/xe/migrate: Add function to copy of VRAM data in chunks
Introduce a new function to copy data between VRAM and sysmem objects. The existing xe_migrate_copy() is tailored for eviction and restore operations, which involves additional logic and operates on entire objects. The xe_migrate_vram_copy_chunk() allows copying chunks of data to or from a dedicated buffer object, which is essential in case of VF migration. Signed-off-by: Lukasz Laguna <lukasz.laguna@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patch.msgid.link/20251112132220.516975-22-michal.winiarski@intel.com Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
1 parent 274186f commit 57a5f45

2 files changed

Lines changed: 131 additions & 5 deletions

File tree

drivers/gpu/drm/xe/xe_migrate.c

Lines changed: 123 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
#include "xe_lrc.h"
3030
#include "xe_map.h"
3131
#include "xe_mocs.h"
32+
#include "xe_printk.h"
3233
#include "xe_pt.h"
3334
#include "xe_res_cursor.h"
3435
#include "xe_sa.h"
@@ -1210,6 +1211,128 @@ struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
12101211
return migrate->q;
12111212
}
12121213

1214+
/**
1215+
* xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
1216+
* @vram_bo: The VRAM buffer object.
1217+
* @vram_offset: The VRAM offset.
1218+
* @sysmem_bo: The sysmem buffer object.
1219+
* @sysmem_offset: The sysmem offset.
1220+
* @size: The size of VRAM chunk to copy.
1221+
* @dir: The direction of the copy operation.
1222+
*
1223+
* Copies a portion of a buffer object between VRAM and system memory.
1224+
* On Xe2 platforms that support flat CCS, VRAM data is decompressed when
1225+
* copying to system memory.
1226+
*
1227+
* Return: Pointer to a dma_fence representing the last copy batch, or
1228+
* an error pointer on failure. If there is a failure, any copy operation
1229+
* started by the function call has been synced.
1230+
*/
1231+
struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
1232+
struct xe_bo *sysmem_bo, u64 sysmem_offset,
1233+
u64 size, enum xe_migrate_copy_dir dir)
1234+
{
1235+
struct xe_device *xe = xe_bo_device(vram_bo);
1236+
struct xe_tile *tile = vram_bo->tile;
1237+
struct xe_gt *gt = tile->primary_gt;
1238+
struct xe_migrate *m = tile->migrate;
1239+
struct dma_fence *fence = NULL;
1240+
struct ttm_resource *vram = vram_bo->ttm.resource;
1241+
struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
1242+
struct xe_res_cursor vram_it, sysmem_it;
1243+
u64 vram_L0_ofs, sysmem_L0_ofs;
1244+
u32 vram_L0_pt, sysmem_L0_pt;
1245+
u64 vram_L0, sysmem_L0;
1246+
bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
1247+
bool use_comp_pat = to_sysmem &&
1248+
GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
1249+
int pass = 0;
1250+
int err;
1251+
1252+
xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
1253+
xe_assert(xe, xe_bo_is_vram(vram_bo));
1254+
xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
1255+
xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
1256+
xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
1257+
1258+
xe_res_first(vram, vram_offset, size, &vram_it);
1259+
xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
1260+
1261+
while (size) {
1262+
u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
1263+
u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
1264+
struct xe_sched_job *job;
1265+
struct xe_bb *bb;
1266+
u32 update_idx;
1267+
bool usm = xe->info.has_usm;
1268+
u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
1269+
1270+
sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
1271+
vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
1272+
1273+
xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
1274+
1275+
pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
1276+
batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
1277+
&vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
1278+
1279+
batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
1280+
&sysmem_L0_pt, 0, avail_pts, avail_pts);
1281+
batch_size += EMIT_COPY_DW;
1282+
1283+
bb = xe_bb_new(gt, batch_size, usm);
1284+
if (IS_ERR(bb)) {
1285+
err = PTR_ERR(bb);
1286+
return ERR_PTR(err);
1287+
}
1288+
1289+
if (xe_migrate_allow_identity(vram_L0, &vram_it))
1290+
xe_res_next(&vram_it, vram_L0);
1291+
else
1292+
emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
1293+
1294+
emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
1295+
1296+
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
1297+
update_idx = bb->len;
1298+
1299+
if (to_sysmem)
1300+
emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
1301+
else
1302+
emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
1303+
1304+
job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
1305+
update_idx);
1306+
if (IS_ERR(job)) {
1307+
xe_bb_free(bb, NULL);
1308+
err = PTR_ERR(job);
1309+
return ERR_PTR(err);
1310+
}
1311+
1312+
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
1313+
1314+
xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
1315+
DMA_RESV_USAGE_BOOKKEEP));
1316+
xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
1317+
DMA_RESV_USAGE_BOOKKEEP));
1318+
1319+
scoped_guard(mutex, &m->job_mutex) {
1320+
xe_sched_job_arm(job);
1321+
dma_fence_put(fence);
1322+
fence = dma_fence_get(&job->drm.s_fence->finished);
1323+
xe_sched_job_push(job);
1324+
1325+
dma_fence_put(m->fence);
1326+
m->fence = dma_fence_get(fence);
1327+
}
1328+
1329+
xe_bb_free(bb, fence);
1330+
size -= vram_L0;
1331+
}
1332+
1333+
return fence;
1334+
}
1335+
12131336
static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
12141337
u32 size, u32 pitch)
12151338
{
@@ -1912,11 +2035,6 @@ static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
19122035
return true;
19132036
}
19142037

1915-
enum xe_migrate_copy_dir {
1916-
XE_MIGRATE_COPY_TO_VRAM,
1917-
XE_MIGRATE_COPY_TO_SRAM,
1918-
};
1919-
19202038
#define XE_CACHELINE_BYTES 64ull
19212039
#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
19222040

drivers/gpu/drm/xe/xe_migrate.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ struct xe_vma;
2828

2929
enum xe_sriov_vf_ccs_rw_ctxs;
3030

31+
enum xe_migrate_copy_dir {
32+
XE_MIGRATE_COPY_TO_VRAM,
33+
XE_MIGRATE_COPY_TO_SRAM,
34+
};
35+
3136
/**
3237
* struct xe_migrate_pt_update_ops - Callbacks for the
3338
* xe_migrate_update_pgtables() function.
@@ -131,6 +136,9 @@ int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
131136

132137
struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
133138
struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
139+
struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
140+
struct xe_bo *sysmem_bo, u64 sysmem_offset,
141+
u64 size, enum xe_migrate_copy_dir dir);
134142
int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
135143
unsigned long offset, void *buf, int len,
136144
int write);

0 commit comments

Comments
 (0)