Skip to content

Commit 76ad741

Browse files
kwachowsjlawryno
authored andcommitted
accel/ivpu: Make command queue ID allocated on XArray
Use XArray for dynamic command queue ID allocations instead of fixed ones. This is required by upcoming changes to UAPI that will allow to manage command queues by user space instead of having predefined number of queues in a context. Signed-off-by: Karol Wachowski <karol.wachowski@intel.com> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241017145817.121590-8-jacek.lawrynowicz@linux.intel.com
1 parent ae7af7d commit 76ad741

4 files changed

Lines changed: 60 additions & 46 deletions

File tree

drivers/accel/ivpu/ivpu_drv.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ static void file_priv_release(struct kref *ref)
104104
pm_runtime_get_sync(vdev->drm.dev);
105105
mutex_lock(&vdev->context_list_lock);
106106
file_priv_unbind(vdev, file_priv);
107+
drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
108+
xa_destroy(&file_priv->cmdq_xa);
107109
mutex_unlock(&vdev->context_list_lock);
108110
pm_runtime_put_autosuspend(vdev->drm.dev);
109111

@@ -259,6 +261,10 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
259261
file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
260262
file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
261263

264+
xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
265+
file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
266+
file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
267+
262268
mutex_unlock(&vdev->context_list_lock);
263269
drm_dev_exit(idx);
264270

drivers/accel/ivpu/ivpu_drv.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@
5252
#define IVPU_NUM_PRIORITIES 4
5353
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
5454

55+
#define IVPU_CMDQ_MIN_ID 1
56+
#define IVPU_CMDQ_MAX_ID 255
57+
5558
#define IVPU_PLATFORM_SILICON 0
5659
#define IVPU_PLATFORM_SIMICS 2
5760
#define IVPU_PLATFORM_FPGA 3
@@ -168,13 +171,15 @@ struct ivpu_file_priv {
168171
struct kref ref;
169172
struct ivpu_device *vdev;
170173
struct mutex lock; /* Protects cmdq */
171-
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
174+
struct xarray cmdq_xa;
172175
struct ivpu_mmu_context ctx;
173176
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
174177
struct list_head ms_instance_list;
175178
struct ivpu_bo *ms_info_bo;
176179
struct xa_limit job_limit;
177180
u32 job_id_next;
181+
struct xa_limit cmdq_limit;
182+
u32 cmdq_id_next;
178183
bool has_mmu_faults;
179184
bool bound;
180185
bool aborted;

drivers/accel/ivpu/ivpu_job.c

Lines changed: 46 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -89,17 +89,26 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
8989
goto err_free_cmdq;
9090
}
9191

92+
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
93+
&file_priv->cmdq_id_next, GFP_KERNEL);
94+
if (ret < 0) {
95+
ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
96+
goto err_erase_db_xa;
97+
}
98+
9299
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
93100
if (!cmdq->mem)
94-
goto err_erase_xa;
101+
goto err_erase_cmdq_xa;
95102

96103
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
97104
if (ret)
98105
ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
99106

100107
return cmdq;
101108

102-
err_erase_xa:
109+
err_erase_cmdq_xa:
110+
xa_erase(&file_priv->cmdq_xa, cmdq->id);
111+
err_erase_db_xa:
103112
xa_erase(&vdev->db_xa, cmdq->db_id);
104113
err_free_cmdq:
105114
kfree(cmdq);
@@ -123,13 +132,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
123132
struct ivpu_device *vdev = file_priv->vdev;
124133
int ret;
125134

126-
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
135+
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
127136
task_pid_nr(current), engine,
128137
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
129138
if (ret)
130139
return ret;
131140

132-
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
141+
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
133142
priority);
134143
if (ret)
135144
return ret;
@@ -143,20 +152,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
143152
int ret;
144153

145154
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
146-
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
155+
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
147156
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
148157
else
149158
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
150159
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
151160

152161
if (!ret)
153-
ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
162+
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
163+
cmdq->db_id, cmdq->id, file_priv->ctx.id);
154164

155165
return ret;
156166
}
157167

158168
static int
159-
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
169+
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
160170
{
161171
struct ivpu_device *vdev = file_priv->vdev;
162172
struct vpu_job_queue_header *jobq_header;
@@ -172,7 +182,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
172182

173183
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
174184
jobq_header = &cmdq->jobq->header;
175-
jobq_header->engine_idx = engine;
185+
jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
176186
jobq_header->head = 0;
177187
jobq_header->tail = 0;
178188
if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
@@ -183,7 +193,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
183193
wmb(); /* Flush WC buffer for jobq->header */
184194

185195
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
186-
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
196+
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
187197
if (ret)
188198
return ret;
189199
}
@@ -210,9 +220,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
210220
cmdq->db_registered = false;
211221

212222
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
213-
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
223+
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
214224
if (!ret)
215-
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
225+
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
216226
}
217227

218228
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
@@ -222,51 +232,46 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
222232
return 0;
223233
}
224234

225-
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
226-
u8 priority)
235+
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
227236
{
228-
struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
237+
struct ivpu_cmdq *cmdq;
238+
unsigned long cmdq_id;
229239
int ret;
230240

231241
lockdep_assert_held(&file_priv->lock);
232242

243+
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
244+
if (cmdq->priority == priority)
245+
break;
246+
233247
if (!cmdq) {
234248
cmdq = ivpu_cmdq_alloc(file_priv);
235249
if (!cmdq)
236250
return NULL;
237-
file_priv->cmdq[priority] = cmdq;
251+
cmdq->priority = priority;
238252
}
239253

240-
ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
254+
ret = ivpu_cmdq_init(file_priv, cmdq, priority);
241255
if (ret)
242256
return NULL;
243257

244258
return cmdq;
245259
}
246260

247-
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u8 priority)
261+
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
248262
{
249-
struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
263+
struct ivpu_cmdq *cmdq;
264+
unsigned long cmdq_id;
250265

251266
lockdep_assert_held(&file_priv->lock);
252267

253-
if (cmdq) {
254-
file_priv->cmdq[priority] = NULL;
268+
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
269+
xa_erase(&file_priv->cmdq_xa, cmdq_id);
255270
ivpu_cmdq_fini(file_priv, cmdq);
256271
ivpu_cmdq_free(file_priv, cmdq);
257272
}
258273
}
259274

260-
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
261-
{
262-
u8 priority;
263-
264-
lockdep_assert_held(&file_priv->lock);
265-
266-
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
267-
ivpu_cmdq_release_locked(file_priv, priority);
268-
}
269-
270275
/*
271276
* Mark the doorbell as unregistered
272277
* This function needs to be called when the VPU hardware is restarted
@@ -275,16 +280,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
275280
*/
276281
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
277282
{
278-
u8 priority;
283+
struct ivpu_cmdq *cmdq;
284+
unsigned long cmdq_id;
279285

280286
mutex_lock(&file_priv->lock);
281287

282-
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
283-
struct ivpu_cmdq *cmdq = file_priv->cmdq[priority];
284-
285-
if (cmdq)
286-
cmdq->db_registered = false;
287-
}
288+
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
289+
cmdq->db_registered = false;
288290

289291
mutex_unlock(&file_priv->lock);
290292
}
@@ -304,12 +306,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
304306

305307
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
306308
{
307-
u8 priority;
309+
struct ivpu_cmdq *cmdq;
310+
unsigned long cmdq_id;
308311

309-
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
310-
if (file_priv->cmdq[priority])
311-
ivpu_cmdq_fini(file_priv, file_priv->cmdq[priority]);
312-
}
312+
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
313+
ivpu_cmdq_fini(file_priv, cmdq);
313314
}
314315

315316
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
@@ -334,8 +335,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
334335

335336
/* Check if there is space left in job queue */
336337
if (next_entry == header->head) {
337-
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
338-
job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
338+
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
339+
job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
339340
return -EBUSY;
340341
}
341342

@@ -522,7 +523,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
522523

523524
mutex_lock(&file_priv->lock);
524525

525-
cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
526+
cmdq = ivpu_cmdq_acquire(file_priv, priority);
526527
if (!cmdq) {
527528
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
528529
file_priv->ctx.id, job->engine_idx, priority);

drivers/accel/ivpu/ivpu_job.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,10 @@ struct ivpu_cmdq {
2828
struct ivpu_bo *secondary_preempt_buf;
2929
struct ivpu_bo *mem;
3030
u32 entry_count;
31+
u32 id;
3132
u32 db_id;
3233
bool db_registered;
34+
u8 priority;
3335
};
3436

3537
/**

0 commit comments

Comments
 (0)