Skip to content

Commit 490fd93

Browse files
committed
Merge tag 'drm-misc-next-2025-11-14-1' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for v6.19: UAPI Changes: - Add sysfs entries, coredump support and uevents to QAIC. - Add fdinfo memory statistics to ivpu. Cross-subsystem Changes: - Handle stub fence initialization during module init. - Stop using system_wq in scheduler and drivers. Core Changes: - Documentation updates to ttm, vblank. - Add EDID quirk for sharp panel. - Use drm_crtc_vblank_(crtc,waitqueue) more in core and drivers. Driver Changes: - Small updates and fixes to panfrost, amdxdna, vmwgfx, ast, ivpu. - Handle preemption in amdxdna. - Add PM support to qaic. - Huge refactor of sun4i's layer code to decouple plane code from output and improve support for DE33. - Add larger page and compression support to nouveau. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patch.msgid.link/1ad3ea69-d029-4a21-8b3d-6b264b1b2a30@linux.intel.com
2 parents 727bf2d + ca25834 commit 490fd93

72 files changed

Lines changed: 2312 additions & 702 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
What: /sys/bus/pci/drivers/qaic/XXXX:XX:XX.X/accel/accel<minor_nr>/dbc<N>_state
2+
Date: October 2025
3+
KernelVersion: 6.19
4+
Contact: Jeff Hugo <jeff.hugo@oss.qualcomm.com>
5+
Description: Represents the current state of DMA Bridge channel (DBC). Below are the possible
6+
states:
7+
8+
=================== ==========================================================
9+
IDLE (0) DBC is free and can be activated
10+
ASSIGNED (1) DBC is activated and a workload is running on device
11+
BEFORE_SHUTDOWN (2) Sub-system associated with this workload has crashed and
12+
it will shutdown soon
13+
AFTER_SHUTDOWN (3) Sub-system associated with this workload has crashed and
14+
it has shutdown
15+
BEFORE_POWER_UP (4) Sub-system associated with this workload is shutdown and
16+
it will be powered up soon
17+
AFTER_POWER_UP (5) Sub-system associated with this workload is now powered up
18+
=================== ==========================================================
19+
Users: Any userspace application or clients interested in DBC state.

Documentation/accel/qaic/aic100.rst

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -487,15 +487,36 @@ one user crashes, the fallout of that should be limited to that workload and not
487487
impact other workloads. SSR accomplishes this.
488488

489489
If a particular workload crashes, QSM notifies the host via the QAIC_SSR MHI
490-
channel. This notification identifies the workload by it's assigned DBC. A
491-
multi-stage recovery process is then used to cleanup both sides, and get the
490+
channel. This notification identifies the workload by its assigned DBC. A
491+
multi-stage recovery process is then used to cleanup both sides, and gets the
492492
DBC/NSPs into a working state.
493493

494494
When SSR occurs, any state in the workload is lost. Any inputs that were in
495495
process, or queued by not yet serviced, are lost. The loaded artifacts will
496496
remain in on-card DDR, but the host will need to re-activate the workload if
497497
it desires to recover the workload.
498498

499+
When SSR occurs for a specific NSP, the assigned DBC goes through the
500+
following state transactions in order:
501+
502+
DBC_STATE_BEFORE_SHUTDOWN
503+
Indicates that the affected NSP was found in an unrecoverable error
504+
condition.
505+
DBC_STATE_AFTER_SHUTDOWN
506+
Indicates that the NSP is under reset.
507+
DBC_STATE_BEFORE_POWER_UP
508+
Indicates that the NSP's debug information has been collected, and is
509+
ready to be collected by the host (if desired). At that stage the NSP
510+
is restarted by QSM.
511+
DBC_STATE_AFTER_POWER_UP
512+
Indicates that the NSP has been restarted, fully operational and is
513+
in idle state.
514+
515+
SSR also has an optional crashdump collection feature. If enabled, the host can
516+
collect the memory dump for the crashed NSP and dump it to the user space via
517+
the dev_coredump subsystem. The host can also decline the crashdump collection
518+
request from the device.
519+
499520
Reliability, Accessibility, Serviceability (RAS)
500521
================================================
501522

Documentation/devicetree/bindings/display/renesas,rzg2l-du.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ properties:
2525
- enum:
2626
- renesas,r9a07g054-du # RZ/V2L
2727
- const: renesas,r9a07g044-du # RZ/G2L fallback
28+
- items:
29+
- const: renesas,r9a09g056-du # RZ/V2N
30+
- const: renesas,r9a09g057-du # RZ/V2H(P) fallback
2831

2932
reg:
3033
maxItems: 1

drivers/accel/amdxdna/aie2_ctx.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,6 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
189189

190190
up(&job->hwctx->priv->job_sem);
191191
job->job_done = true;
192-
dma_fence_put(fence);
193192
mmput_async(job->mm);
194193
aie2_job_put(job);
195194
}
@@ -691,17 +690,19 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
691690
xdna = hwctx->client->xdna;
692691

693692
XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
694-
drm_sched_entity_destroy(&hwctx->priv->entity);
695-
696693
aie2_hwctx_wait_for_idle(hwctx);
697694

698695
/* Request fw to destroy hwctx and cancel the rest pending requests */
699696
aie2_release_resource(hwctx);
700697

698+
mutex_unlock(&xdna->dev_lock);
699+
drm_sched_entity_destroy(&hwctx->priv->entity);
700+
701701
/* Wait for all submitted jobs to be completed or canceled */
702702
wait_event(hwctx->priv->job_free_wq,
703703
atomic64_read(&hwctx->job_submit_cnt) ==
704704
atomic64_read(&hwctx->job_free_cnt));
705+
mutex_lock(&xdna->dev_lock);
705706

706707
drm_sched_fini(&hwctx->priv->sched);
707708
aie2_ctx_syncobj_destroy(hwctx);

drivers/accel/amdxdna/aie2_message.c

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -210,6 +210,14 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
210210
hwctx->fw_ctx_id = resp.context_id;
211211
WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
212212

213+
if (ndev->force_preempt_enabled) {
214+
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FORCE_PREEMPT, &hwctx->fw_ctx_id);
215+
if (ret) {
216+
XDNA_ERR(xdna, "failed to enable force preempt %d", ret);
217+
return ret;
218+
}
219+
}
220+
213221
cq_pair = &resp.cq_pair[0];
214222
x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
215223
x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
@@ -601,6 +609,11 @@ aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
601609
return 0;
602610
}
603611

612+
static int aie2_cmdlist_unsupp(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
613+
{
614+
return -EOPNOTSUPP;
615+
}
616+
604617
static u32 aie2_get_chain_msg_op(u32 cmd_op)
605618
{
606619
switch (cmd_op) {
@@ -621,6 +634,8 @@ static struct aie2_exec_msg_ops legacy_exec_message_ops = {
621634
.init_chain_req = aie2_init_exec_chain_req,
622635
.fill_cf_slot = aie2_cmdlist_fill_cf,
623636
.fill_dpu_slot = aie2_cmdlist_fill_dpu,
637+
.fill_preempt_slot = aie2_cmdlist_unsupp,
638+
.fill_elf_slot = aie2_cmdlist_unsupp,
624639
.get_chain_msg_op = aie2_get_chain_msg_op,
625640
};
626641

@@ -680,6 +695,74 @@ aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *si
680695
return 0;
681696
}
682697

698+
static int
699+
aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
700+
{
701+
struct cmd_chain_slot_npu *npu_slot = slot;
702+
struct amdxdna_cmd_preempt_data *pd;
703+
u32 cmd_len;
704+
u32 arg_sz;
705+
706+
pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
707+
arg_sz = cmd_len - sizeof(*pd);
708+
if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
709+
return -EINVAL;
710+
711+
if (*size < sizeof(*npu_slot) + arg_sz)
712+
return -EINVAL;
713+
714+
npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
715+
if (npu_slot->cu_idx == INVALID_CU_IDX)
716+
return -EINVAL;
717+
718+
memset(npu_slot, 0, sizeof(*npu_slot));
719+
npu_slot->type = EXEC_NPU_TYPE_PREEMPT;
720+
npu_slot->inst_buf_addr = pd->inst_buf;
721+
npu_slot->save_buf_addr = pd->save_buf;
722+
npu_slot->restore_buf_addr = pd->restore_buf;
723+
npu_slot->inst_size = pd->inst_size;
724+
npu_slot->save_size = pd->save_size;
725+
npu_slot->restore_size = pd->restore_size;
726+
npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
727+
npu_slot->arg_cnt = arg_sz / sizeof(u32);
728+
memcpy(npu_slot->args, pd->prop_args, arg_sz);
729+
730+
*size = sizeof(*npu_slot) + arg_sz;
731+
return 0;
732+
}
733+
734+
static int
735+
aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
736+
{
737+
struct cmd_chain_slot_npu *npu_slot = slot;
738+
struct amdxdna_cmd_preempt_data *pd;
739+
u32 cmd_len;
740+
u32 arg_sz;
741+
742+
pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
743+
arg_sz = cmd_len - sizeof(*pd);
744+
if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
745+
return -EINVAL;
746+
747+
if (*size < sizeof(*npu_slot) + arg_sz)
748+
return -EINVAL;
749+
750+
memset(npu_slot, 0, sizeof(*npu_slot));
751+
npu_slot->type = EXEC_NPU_TYPE_ELF;
752+
npu_slot->inst_buf_addr = pd->inst_buf;
753+
npu_slot->save_buf_addr = pd->save_buf;
754+
npu_slot->restore_buf_addr = pd->restore_buf;
755+
npu_slot->inst_size = pd->inst_size;
756+
npu_slot->save_size = pd->save_size;
757+
npu_slot->restore_size = pd->restore_size;
758+
npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
759+
npu_slot->arg_cnt = 1;
760+
npu_slot->args[0] = AIE2_EXEC_BUFFER_KERNEL_OP_TXN;
761+
762+
*size = struct_size(npu_slot, args, npu_slot->arg_cnt);
763+
return 0;
764+
}
765+
683766
static u32 aie2_get_npu_chain_msg_op(u32 cmd_op)
684767
{
685768
return MSG_OP_CHAIN_EXEC_NPU;
@@ -691,6 +774,8 @@ static struct aie2_exec_msg_ops npu_exec_message_ops = {
691774
.init_chain_req = aie2_init_npu_chain_req,
692775
.fill_cf_slot = aie2_cmdlist_fill_npu_cf,
693776
.fill_dpu_slot = aie2_cmdlist_fill_npu_dpu,
777+
.fill_preempt_slot = aie2_cmdlist_fill_npu_preempt,
778+
.fill_elf_slot = aie2_cmdlist_fill_npu_elf,
694779
.get_chain_msg_op = aie2_get_npu_chain_msg_op,
695780
};
696781

@@ -749,6 +834,16 @@ aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo,
749834
case ERT_START_NPU:
750835
ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size);
751836
break;
837+
case ERT_START_NPU_PREEMPT:
838+
if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
839+
return -EOPNOTSUPP;
840+
ret = EXEC_MSG_OPS(xdna)->fill_preempt_slot(cmd_abo, slot, size);
841+
break;
842+
case ERT_START_NPU_PREEMPT_ELF:
843+
if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
844+
return -EOPNOTSUPP;
845+
ret = EXEC_MSG_OPS(xdna)->fill_elf_slot(cmd_abo, slot, size);
846+
break;
752847
default:
753848
XDNA_INFO(xdna, "Unsupported op %d", op);
754849
ret = -EOPNOTSUPP;

drivers/accel/amdxdna/aie2_msg_priv.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,8 @@ struct exec_dpu_req {
176176
enum exec_npu_type {
177177
EXEC_NPU_TYPE_NON_ELF = 0x1,
178178
EXEC_NPU_TYPE_PARTIAL_ELF = 0x2,
179+
EXEC_NPU_TYPE_PREEMPT = 0x3,
180+
EXEC_NPU_TYPE_ELF = 0x4,
179181
};
180182

181183
union exec_req {
@@ -372,6 +374,7 @@ struct cmd_chain_slot_dpu {
372374
};
373375

374376
#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32))
377+
#define AIE2_EXEC_BUFFER_KERNEL_OP_TXN 3
375378
struct cmd_chain_slot_npu {
376379
enum exec_npu_type type;
377380
u64 inst_buf_addr;

drivers/accel/amdxdna/aie2_pci.c

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,10 @@ int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
183183
if (cfg->category != category)
184184
continue;
185185

186+
if (cfg->feature_mask &&
187+
bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
188+
continue;
189+
186190
value = val ? *val : cfg->value;
187191
ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
188192
if (ret) {
@@ -932,6 +936,25 @@ static int aie2_get_telemetry(struct amdxdna_client *client,
932936
return 0;
933937
}
934938

939+
static int aie2_get_preempt_state(struct amdxdna_client *client,
940+
struct amdxdna_drm_get_info *args)
941+
{
942+
struct amdxdna_drm_attribute_state state = {};
943+
struct amdxdna_dev *xdna = client->xdna;
944+
struct amdxdna_dev_hdl *ndev;
945+
946+
ndev = xdna->dev_handle;
947+
if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
948+
state.state = ndev->force_preempt_enabled;
949+
else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
950+
state.state = ndev->frame_boundary_preempt;
951+
952+
if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
953+
return -EFAULT;
954+
955+
return 0;
956+
}
957+
935958
static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
936959
{
937960
struct amdxdna_dev *xdna = client->xdna;
@@ -972,6 +995,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
972995
case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
973996
ret = aie2_query_resource_info(client, args);
974997
break;
998+
case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
999+
case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
1000+
ret = aie2_get_preempt_state(client, args);
1001+
break;
9751002
default:
9761003
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
9771004
ret = -EOPNOTSUPP;
@@ -1078,6 +1105,38 @@ static int aie2_set_power_mode(struct amdxdna_client *client,
10781105
return aie2_pm_set_mode(xdna->dev_handle, power_mode);
10791106
}
10801107

1108+
static int aie2_set_preempt_state(struct amdxdna_client *client,
1109+
struct amdxdna_drm_set_state *args)
1110+
{
1111+
struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
1112+
struct amdxdna_drm_attribute_state state;
1113+
u32 val;
1114+
int ret;
1115+
1116+
if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
1117+
return -EFAULT;
1118+
1119+
if (state.state > 1)
1120+
return -EINVAL;
1121+
1122+
if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
1123+
return -EINVAL;
1124+
1125+
if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
1126+
ndev->force_preempt_enabled = state.state;
1127+
} else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
1128+
val = state.state;
1129+
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
1130+
&val);
1131+
if (ret)
1132+
return ret;
1133+
1134+
ndev->frame_boundary_preempt = state.state;
1135+
}
1136+
1137+
return 0;
1138+
}
1139+
10811140
static int aie2_set_state(struct amdxdna_client *client,
10821141
struct amdxdna_drm_set_state *args)
10831142
{
@@ -1095,6 +1154,10 @@ static int aie2_set_state(struct amdxdna_client *client,
10951154
case DRM_AMDXDNA_SET_POWER_MODE:
10961155
ret = aie2_set_power_mode(client, args);
10971156
break;
1157+
case DRM_AMDXDNA_SET_FORCE_PREEMPT:
1158+
case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
1159+
ret = aie2_set_preempt_state(client, args);
1160+
break;
10981161
default:
10991162
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
11001163
ret = -EOPNOTSUPP;

drivers/accel/amdxdna/aie2_pci.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,12 +110,15 @@ struct aie_metadata {
110110
enum rt_config_category {
111111
AIE2_RT_CFG_INIT,
112112
AIE2_RT_CFG_CLK_GATING,
113+
AIE2_RT_CFG_FORCE_PREEMPT,
114+
AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
113115
};
114116

115117
struct rt_config {
116118
u32 type;
117119
u32 value;
118120
u32 category;
121+
unsigned long feature_mask;
119122
};
120123

121124
struct dpm_clk_freq {
@@ -164,6 +167,8 @@ struct aie2_exec_msg_ops {
164167
void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt);
165168
int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
166169
int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
170+
int (*fill_preempt_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
171+
int (*fill_elf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
167172
u32 (*get_chain_msg_op)(u32 cmd_op);
168173
};
169174

@@ -197,6 +202,8 @@ struct amdxdna_dev_hdl {
197202
u32 hclk_freq;
198203
u32 max_tops;
199204
u32 curr_tops;
205+
u32 force_preempt_enabled;
206+
u32 frame_boundary_preempt;
200207

201208
/* Mailbox and the management channel */
202209
struct mailbox *mbox;
@@ -223,6 +230,7 @@ struct aie2_hw_ops {
223230

224231
enum aie2_fw_feature {
225232
AIE2_NPU_COMMAND,
233+
AIE2_PREEMPT,
226234
AIE2_FEATURE_MAX
227235
};
228236

0 commit comments

Comments
 (0)