Skip to content

Commit 0c36b6a

Browse files
Davidlohr Buesodjbw
authored andcommitted
cxl/mbox: Add sanitization handling machinery
Sanitization is by definition a device-monopolizing operation, and thus the timeslicing rules for other background commands do not apply. As such handle this special case asynchronously and return immediately. Subsequent changes will allow completion to be pollable from userspace via a sysfs file interface. For devices that don't support interrupts for notifying background command completion, self-poll with the caveat that the poller can be out of sync with the ready hardware, and therefore care must be taken to not allow any new commands to go through until the poller sees the hw completion. The poller takes the mbox_mutex to stabilize the flagging, minimizing any runtime overhead in the send path to check for 'sanitize_tmo' for uncommon poll scenarios. The irq case is much simpler as hardware will serialize/error appropriately. Reviewed-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Davidlohr Bueso <dave@stgolabs.net> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Link: https://lore.kernel.org/r/20230612181038.14421-4-dave@stgolabs.net Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 9968c9d commit 0c36b6a

3 files changed

Lines changed: 91 additions & 3 deletions

File tree

drivers/cxl/core/memdev.c

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -460,11 +460,21 @@ void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cm
460460
}
461461
EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
462462

463+
static void cxl_memdev_security_shutdown(struct device *dev)
464+
{
465+
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
466+
struct cxl_dev_state *cxlds = cxlmd->cxlds;
467+
468+
if (cxlds->security.poll)
469+
cancel_delayed_work_sync(&cxlds->security.poll_dwork);
470+
}
471+
463472
static void cxl_memdev_shutdown(struct device *dev)
464473
{
465474
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
466475

467476
down_write(&cxl_memdev_rwsem);
477+
cxl_memdev_security_shutdown(dev);
468478
cxlmd->cxlds = NULL;
469479
up_write(&cxl_memdev_rwsem);
470480
}

drivers/cxl/cxlmem.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,9 +264,15 @@ struct cxl_poison_state {
264264
* struct cxl_security_state - Device security state
265265
*
266266
* @state: state of last security operation
267+
* @poll: polling for sanitization is enabled, device has no mbox irq support
268+
* @poll_tmo_secs: polling timeout
269+
* @poll_dwork: polling work item
267270
*/
268271
struct cxl_security_state {
269272
unsigned long state;
273+
bool poll;
274+
int poll_tmo_secs;
275+
struct delayed_work poll_dwork;
270276
};
271277

272278
/**
@@ -379,6 +385,7 @@ enum cxl_opcode {
379385
CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303,
380386
CXL_MBOX_OP_SCAN_MEDIA = 0x4304,
381387
CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305,
388+
CXL_MBOX_OP_SANITIZE = 0x4400,
382389
CXL_MBOX_OP_GET_SECURITY_STATE = 0x4500,
383390
CXL_MBOX_OP_SET_PASSPHRASE = 0x4501,
384391
CXL_MBOX_OP_DISABLE_PASSPHRASE = 0x4502,

drivers/cxl/pci.c

Lines changed: 74 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -115,18 +115,52 @@ static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
115115

116116
static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
117117
{
118+
u64 reg;
119+
u16 opcode;
118120
struct cxl_dev_id *dev_id = id;
119121
struct cxl_dev_state *cxlds = dev_id->cxlds;
120122

121123
if (!cxl_mbox_background_complete(cxlds))
122124
return IRQ_NONE;
123125

124-
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
125-
rcuwait_wake_up(&cxlds->mbox_wait);
126+
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
127+
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
128+
if (opcode == CXL_MBOX_OP_SANITIZE) {
129+
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
130+
} else {
131+
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
132+
rcuwait_wake_up(&cxlds->mbox_wait);
133+
}
126134

127135
return IRQ_HANDLED;
128136
}
129137

138+
/*
139+
* Sanitization operation polling mode.
140+
*/
141+
static void cxl_mbox_sanitize_work(struct work_struct *work)
142+
{
143+
struct cxl_dev_state *cxlds;
144+
145+
cxlds = container_of(work,
146+
struct cxl_dev_state, security.poll_dwork.work);
147+
148+
mutex_lock(&cxlds->mbox_mutex);
149+
if (cxl_mbox_background_complete(cxlds)) {
150+
cxlds->security.poll_tmo_secs = 0;
151+
put_device(cxlds->dev);
152+
153+
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
154+
} else {
155+
int timeout = cxlds->security.poll_tmo_secs + 10;
156+
157+
cxlds->security.poll_tmo_secs = min(15 * 60, timeout);
158+
queue_delayed_work(system_wq, &cxlds->security.poll_dwork,
159+
timeout * HZ);
160+
}
161+
mutex_unlock(&cxlds->mbox_mutex);
162+
}
163+
130164
/**
131165
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
132166
* @cxlds: The device state to communicate with.
@@ -187,6 +221,16 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
187221
return -EBUSY;
188222
}
189223

224+
/*
225+
* With sanitize polling, hardware might be done and the poller still
226+
* not be in sync. Ensure no new command comes in until so. Keep the
227+
* hardware semantics and only allow device health status.
228+
*/
229+
if (cxlds->security.poll_tmo_secs > 0) {
230+
if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
231+
return -EBUSY;
232+
}
233+
190234
cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
191235
mbox_cmd->opcode);
192236
if (mbox_cmd->size_in) {
@@ -235,11 +279,34 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
235279
*/
236280
if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
237281
u64 bg_status_reg;
238-
int i, timeout = mbox_cmd->poll_interval_ms;
282+
int i, timeout;
283+
284+
/*
285+
* Sanitization is a special case which monopolizes the device
286+
* and cannot be timesliced. Handle asynchronously instead,
287+
* and allow userspace to poll(2) for completion.
288+
*/
289+
if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
290+
if (cxlds->security.poll_tmo_secs != -1) {
291+
/* hold the device throughout */
292+
get_device(cxlds->dev);
293+
294+
/* give first timeout a second */
295+
timeout = 1;
296+
cxlds->security.poll_tmo_secs = timeout;
297+
queue_delayed_work(system_wq,
298+
&cxlds->security.poll_dwork,
299+
timeout * HZ);
300+
}
301+
302+
dev_dbg(dev, "Sanitization operation started\n");
303+
goto success;
304+
}
239305

240306
dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
241307
mbox_cmd->opcode);
242308

309+
timeout = mbox_cmd->poll_interval_ms;
243310
for (i = 0; i < mbox_cmd->poll_count; i++) {
244311
if (rcuwait_wait_event_timeout(&cxlds->mbox_wait,
245312
cxl_mbox_background_complete(cxlds),
@@ -270,6 +337,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
270337
return 0; /* completed but caller must check return_code */
271338
}
272339

340+
success:
273341
/* #7 */
274342
cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
275343
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
@@ -382,6 +450,9 @@ static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
382450
}
383451

384452
mbox_poll:
453+
cxlds->security.poll = true;
454+
INIT_DELAYED_WORK(&cxlds->security.poll_dwork, cxl_mbox_sanitize_work);
455+
385456
dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
386457
return 0;
387458
}

0 commit comments

Comments
 (0)