Skip to content

Commit afc3b50

Browse files
ChangSeokBaebp3tk0v
authored andcommitted
x86/microcode/intel: Implement staging handler
Previously, per-package staging invocations and their associated state data were established. The next step is to implement the actual staging handler according to the specified protocol. Below are key aspects to note: (a) Each staging process must begin by resetting the staging hardware. (b) The staging hardware processes up to a page-sized chunk of the microcode image per iteration, requiring software to submit data incrementally. (c) Once a data chunk is processed, the hardware responds with an offset in the image for the next chunk. (d) The offset may indicate completion or request retransmission of an already transferred chunk. As long as the total transferred data remains within the predefined limit (twice the image size), retransmissions should be acceptable. Incorporate them in the handler, while data transmission and mailbox format handling are implemented separately. [ bp: Sort the headers in a reversed name-length order. ] Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Tested-by: Anselm Busse <abusse@amazon.de> Link: https://lore.kernel.org/20250320234104.8288-1-chang.seok.bae@intel.com
1 parent 079b90d commit afc3b50

1 file changed

Lines changed: 120 additions & 3 deletions

File tree

  • arch/x86/kernel/cpu/microcode

arch/x86/kernel/cpu/microcode/intel.c

Lines changed: 120 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,11 @@
1616
#include <linux/uaccess.h>
1717
#include <linux/initrd.h>
1818
#include <linux/kernel.h>
19+
#include <linux/delay.h>
1920
#include <linux/slab.h>
2021
#include <linux/cpu.h>
2122
#include <linux/uio.h>
23+
#include <linux/io.h>
2224
#include <linux/mm.h>
2325

2426
#include <asm/cpu_device_id.h>
@@ -33,6 +35,15 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
3335

3436
#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL)
3537

38+
/* Defines for the microcode staging mailbox interface */
39+
#define MBOX_REG_NUM 4
40+
#define MBOX_REG_SIZE sizeof(u32)
41+
42+
#define MBOX_CONTROL_OFFSET 0x0
43+
#define MBOX_STATUS_OFFSET 0x4
44+
45+
#define MASK_MBOX_CTRL_ABORT BIT(0)
46+
3647
/* Current microcode patch used in early patching on the APs. */
3748
static struct microcode_intel *ucode_patch_va __read_mostly;
3849
static struct microcode_intel *ucode_patch_late __read_mostly;
@@ -317,13 +328,119 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size,
317328
}
318329

319330
/*
320-
* Handle the staging process using the mailbox MMIO interface.
331+
* Prepare for a new microcode transfer: reset hardware and record the
332+
* image size.
333+
*/
334+
static void init_stage(struct staging_state *ss)
335+
{
336+
ss->ucode_len = get_totalsize(&ucode_patch_late->hdr);
337+
338+
/*
339+
* Abort any ongoing process, effectively resetting the device.
340+
* Unlike regular mailbox data processing requests, this
341+
* operation does not require a status check.
342+
*/
343+
writel(MASK_MBOX_CTRL_ABORT, ss->mmio_base + MBOX_CONTROL_OFFSET);
344+
}
345+
346+
/*
347+
* Update the chunk size and decide whether another chunk can be sent.
348+
* This accounts for remaining data and retry limits.
349+
*/
350+
static bool can_send_next_chunk(struct staging_state *ss, int *err)
351+
{
352+
/* A page size or remaining bytes if this is the final chunk */
353+
ss->chunk_size = min(PAGE_SIZE, ss->ucode_len - ss->offset);
354+
355+
/*
356+
* Each microcode image is divided into chunks, each at most
357+
* one page size. A 10-chunk image would typically require 10
358+
* transactions.
359+
*
360+
* However, the hardware managing the mailbox has limited
361+
* resources and may not cache the entire image, potentially
362+
* requesting the same chunk multiple times.
363+
*
364+
* To tolerate this behavior, allow up to twice the expected
365+
* number of transactions (i.e., a 10-chunk image can take up to
366+
* 20 attempts).
367+
*
368+
* If the number of attempts exceeds this limit, treat it as
369+
* exceeding the maximum allowed transfer size.
370+
*/
371+
if (ss->bytes_sent + ss->chunk_size > ss->ucode_len * 2) {
372+
*err = -EMSGSIZE;
373+
return false;
374+
}
375+
376+
*err = 0;
377+
return true;
378+
}
379+
380+
/*
381+
* Determine whether staging is complete: either the hardware signaled
382+
* the end offset, or no more transactions are permitted (retry limit
383+
* reached).
384+
*/
385+
static inline bool staging_is_complete(struct staging_state *ss, int *err)
386+
{
387+
return (ss->offset == UINT_MAX) || !can_send_next_chunk(ss, err);
388+
}
389+
390+
/*
391+
* Transmit a chunk of the microcode image to the hardware.
392+
* Return 0 on success, or an error code on failure.
393+
*/
394+
static int send_data_chunk(struct staging_state *ss, void *ucode_ptr __maybe_unused)
395+
{
396+
pr_debug_once("Staging mailbox loading code needs to be implemented.\n");
397+
return -EPROTONOSUPPORT;
398+
}
399+
400+
/*
401+
* Retrieve the next offset from the hardware response.
402+
* Return 0 on success, or an error code on failure.
403+
*/
404+
static int fetch_next_offset(struct staging_state *ss)
405+
{
406+
pr_debug_once("Staging mailbox response handling code needs to be implemented.\n");
407+
return -EPROTONOSUPPORT;
408+
}
409+
410+
/*
411+
* Handle the staging process using the mailbox MMIO interface. The
412+
* microcode image is transferred in chunks until completion.
321413
* Return 0 on success or an error code on failure.
322414
*/
323415
static int do_stage(u64 mmio_pa)
324416
{
325-
pr_debug_once("Staging implementation is pending.\n");
326-
return -EPROTONOSUPPORT;
417+
struct staging_state ss = {};
418+
int err;
419+
420+
ss.mmio_base = ioremap(mmio_pa, MBOX_REG_NUM * MBOX_REG_SIZE);
421+
if (WARN_ON_ONCE(!ss.mmio_base))
422+
return -EADDRNOTAVAIL;
423+
424+
init_stage(&ss);
425+
426+
/* Perform the staging process while within the retry limit */
427+
while (!staging_is_complete(&ss, &err)) {
428+
/* Send a chunk of microcode each time: */
429+
err = send_data_chunk(&ss, ucode_patch_late);
430+
if (err)
431+
break;
432+
/*
433+
* Then, ask the hardware which piece of the image it
434+
* needs next. The same piece may be sent more than once.
435+
*/
436+
err = fetch_next_offset(&ss);
437+
if (err)
438+
break;
439+
}
440+
441+
iounmap(ss.mmio_base);
442+
443+
return err;
327444
}
328445

329446
static void stage_microcode(void)

0 commit comments

Comments
 (0)