|
| 1 | +// SPDX-License-Identifier: MIT |
| 2 | +/* |
| 3 | + * Copyright © 2025 Intel Corporation |
| 4 | + */ |
| 5 | + |
| 6 | +#include "abi/guc_actions_abi.h" |
| 7 | +#include "xe_guc.h" |
| 8 | +#include "xe_guc_ct.h" |
| 9 | +#include "xe_guc_pagefault.h" |
| 10 | +#include "xe_pagefault.h" |
| 11 | + |
| 12 | +static void guc_ack_fault(struct xe_pagefault *pf, int err) |
| 13 | +{ |
| 14 | + u32 vfid = FIELD_GET(PFD_VFID, pf->producer.msg[2]); |
| 15 | + u32 engine_instance = FIELD_GET(PFD_ENG_INSTANCE, pf->producer.msg[0]); |
| 16 | + u32 engine_class = FIELD_GET(PFD_ENG_CLASS, pf->producer.msg[0]); |
| 17 | + u32 pdata = FIELD_GET(PFD_PDATA_LO, pf->producer.msg[0]) | |
| 18 | + (FIELD_GET(PFD_PDATA_HI, pf->producer.msg[1]) << |
| 19 | + PFD_PDATA_HI_SHIFT); |
| 20 | + u32 action[] = { |
| 21 | + XE_GUC_ACTION_PAGE_FAULT_RES_DESC, |
| 22 | + |
| 23 | + FIELD_PREP(PFR_VALID, 1) | |
| 24 | + FIELD_PREP(PFR_SUCCESS, !!err) | |
| 25 | + FIELD_PREP(PFR_REPLY, PFR_ACCESS) | |
| 26 | + FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) | |
| 27 | + FIELD_PREP(PFR_ASID, pf->consumer.asid), |
| 28 | + |
| 29 | + FIELD_PREP(PFR_VFID, vfid) | |
| 30 | + FIELD_PREP(PFR_ENG_INSTANCE, engine_instance) | |
| 31 | + FIELD_PREP(PFR_ENG_CLASS, engine_class) | |
| 32 | + FIELD_PREP(PFR_PDATA, pdata), |
| 33 | + }; |
| 34 | + struct xe_guc *guc = pf->producer.private; |
| 35 | + |
| 36 | + xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0); |
| 37 | +} |
| 38 | + |
| 39 | +static const struct xe_pagefault_ops guc_pagefault_ops = { |
| 40 | + .ack_fault = guc_ack_fault, |
| 41 | +}; |
| 42 | + |
| 43 | +/** |
| 44 | + * xe_guc_pagefault_handler() - G2H page fault handler |
| 45 | + * @guc: GuC object |
| 46 | + * @msg: G2H message |
| 47 | + * @len: Length of G2H message |
| 48 | + * |
| 49 | + * Parse GuC to host (G2H) message into a struct xe_pagefault and forward onto |
| 50 | + * the Xe page fault layer. |
| 51 | + * |
| 52 | + * Return: 0 on success, errno on failure |
| 53 | + */ |
| 54 | +int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len) |
| 55 | +{ |
| 56 | + struct xe_pagefault pf; |
| 57 | + int i; |
| 58 | + |
| 59 | +#define GUC_PF_MSG_LEN_DW \ |
| 60 | + (sizeof(struct xe_guc_pagefault_desc) / sizeof(u32)) |
| 61 | + |
| 62 | + BUILD_BUG_ON(GUC_PF_MSG_LEN_DW > XE_PAGEFAULT_PRODUCER_MSG_LEN_DW); |
| 63 | + |
| 64 | + if (len != GUC_PF_MSG_LEN_DW) |
| 65 | + return -EPROTO; |
| 66 | + |
| 67 | + pf.gt = guc_to_gt(guc); |
| 68 | + |
| 69 | + /* |
| 70 | + * XXX: These values happen to match the enum in xe_pagefault_types.h. |
| 71 | + * If that changes, we’ll need to remap them here. |
| 72 | + */ |
| 73 | + pf.consumer.page_addr = ((u64)FIELD_GET(PFD_VIRTUAL_ADDR_HI, msg[3]) |
| 74 | + << PFD_VIRTUAL_ADDR_HI_SHIFT) | |
| 75 | + (FIELD_GET(PFD_VIRTUAL_ADDR_LO, msg[2]) << |
| 76 | + PFD_VIRTUAL_ADDR_LO_SHIFT); |
| 77 | + pf.consumer.asid = FIELD_GET(PFD_ASID, msg[1]); |
| 78 | + pf.consumer.access_type = FIELD_GET(PFD_ACCESS_TYPE, msg[2]); |
| 79 | + pf.consumer.fault_type = FIELD_GET(PFD_FAULT_TYPE, msg[2]); |
| 80 | + if (FIELD_GET(XE2_PFD_TRVA_FAULT, msg[0])) |
| 81 | + pf.consumer.fault_level = XE_PAGEFAULT_LEVEL_NACK; |
| 82 | + else |
| 83 | + pf.consumer.fault_level = FIELD_GET(PFD_FAULT_LEVEL, msg[0]); |
| 84 | + pf.consumer.engine_class = FIELD_GET(PFD_ENG_CLASS, msg[0]); |
| 85 | + pf.consumer.engine_instance = FIELD_GET(PFD_ENG_INSTANCE, msg[0]); |
| 86 | + |
| 87 | + pf.producer.private = guc; |
| 88 | + pf.producer.ops = &guc_pagefault_ops; |
| 89 | + for (i = 0; i < GUC_PF_MSG_LEN_DW; ++i) |
| 90 | + pf.producer.msg[i] = msg[i]; |
| 91 | + |
| 92 | +#undef GUC_PF_MSG_LEN_DW |
| 93 | + |
| 94 | + return xe_pagefault_handler(guc_to_xe(guc), &pf); |
| 95 | +} |
0 commit comments