Skip to content

Commit 22bdd6e

Browse files
committed
Merge tag 'x86_apic_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 SEV and apic updates from Borislav Petkov: - Add functionality to provide runtime firmware updates for the non-x86 parts of an AMD platform like the security processor (ASP) firmware, modules etc, for example. The intent being that these updates are interim, live fixups before a proper BIOS update can be attempted - Add guest support for AMD's Secure AVIC feature which gives encrypted guests the needed protection against a malicious hypervisor generating unexpected interrupts and injecting them into such guest, thus interfering with its operation in an unexpected and negative manner. The advantage of this scheme is that the guest determines which interrupts and when to accept them vs leaving that to the benevolence (or not) of the hypervisor - Strictly separate the startup code from the rest of the kernel where former is executed from the initial 1:1 mapping of memory. The problem was that the toolchain-generated version of the code was being executed from a different mapping of memory than what was "assumed" during code generation, needing an ever-growing pile of fixups for absolute memory references which are invalid in the early, 1:1 memory mapping during boot. The major advantage of this is that there's no need to check the 1:1 mapping portion of the code for absolute relocations anymore and get rid of the RIP_REL_REF() macro sprinkling all over the place. For more info, see Ard's very detailed writeup on this [1] - The usual cleanups and fixes Link: https://lore.kernel.org/r/CAMj1kXEzKEuePEiHB%2BHxvfQbFz0sTiHdn4B%2B%2BzVBJ2mhkPkQ4Q@mail.gmail.com [1] * tag 'x86_apic_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits) x86/boot: Drop erroneous __init annotation from early_set_pages_state() crypto: ccp - Add AMD Seamless Firmware Servicing (SFS) driver crypto: ccp - Add new HV-Fixed page allocation/free API x86/sev: Add new dump_rmp parameter to snp_leak_pages() API x86/startup/sev: Document the CPUID flow in the boot #VC handler objtool: Ignore __pi___cfi_ prefixed symbols x86/sev: Zap snp_abort() x86/apic/savic: Do not use snp_abort() x86/boot: Get rid of the .head.text section x86/boot: Move startup code out of __head section efistub/x86: Remap inittext read-execute when needed x86/boot: Create a confined code area for startup code x86/kbuild: Incorporate boot/startup/ via Kbuild makefile x86/boot: Revert "Reject absolute references in .head.text" x86/boot: Check startup code for absence of absolute relocations objtool: Add action to check for absence of absolute relocations x86/sev: Export startup routines for later use x86/sev: Move __sev_[get|put]_ghcb() into separate noinstr object x86/sev: Provide PIC aliases for SEV related data objects x86/boot: Provide PIC aliases for 5-level paging related constants ...
2 parents 2cb8eea + 1f6113a commit 22bdd6e

61 files changed

Lines changed: 2042 additions & 708 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

arch/x86/Kbuild

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
# Branch profiling isn't noinstr-safe. Disable it for arch/x86/*
44
subdir-ccflags-$(CONFIG_TRACE_BRANCH_PROFILING) += -DDISABLE_BRANCH_PROFILING
55

6+
obj-y += boot/startup/
7+
68
obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += coco/
79

810
obj-y += entry/

arch/x86/Kconfig

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -487,6 +487,19 @@ config X86_X2APIC
487487

488488
If in doubt, say Y.
489489

490+
config AMD_SECURE_AVIC
491+
bool "AMD Secure AVIC"
492+
depends on AMD_MEM_ENCRYPT && X86_X2APIC
493+
help
494+
Enable this to get AMD Secure AVIC support on guests that have this feature.
495+
496+
AMD Secure AVIC provides hardware acceleration for performance sensitive
497+
APIC accesses and support for managing guest owned APIC state for SEV-SNP
498+
guests. Secure AVIC does not support xAPIC mode. It has functional
499+
dependency on x2apic being enabled in the guest.
500+
501+
If you don't know what to do here, say N.
502+
490503
config X86_POSTED_MSI
491504
bool "Enable MSI and MSI-x delivery by posted interrupts"
492505
depends on X86_64 && IRQ_REMAP

arch/x86/Makefile

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,6 @@ archprepare: $(cpufeaturemasks.hdr)
275275
###
276276
# Kernel objects
277277

278-
core-y += arch/x86/boot/startup/
279278
libs-y += arch/x86/lib/
280279

281280
# drivers-y are linked after core-y

arch/x86/boot/compressed/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ LDFLAGS_vmlinux += -T
7373
hostprogs := mkpiggy
7474
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
7575

76-
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
76+
sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABbCDGRSTtVW] \(_text\|__start_rodata\|_sinittext\|__inittext_end\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
7777

7878
quiet_cmd_voffset = VOFFSET $@
7979
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@

arch/x86/boot/compressed/misc.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,8 @@ static size_t parse_elf(void *output)
332332
}
333333

334334
const unsigned long kernel_text_size = VO___start_rodata - VO__text;
335+
const unsigned long kernel_inittext_offset = VO__sinittext - VO__text;
336+
const unsigned long kernel_inittext_size = VO___inittext_end - VO__sinittext;
335337
const unsigned long kernel_total_size = VO__end - VO__text;
336338

337339
static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);

arch/x86/boot/compressed/sev-handle-vc.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: GPL-2.0
22

33
#include "misc.h"
4+
#include "error.h"
45
#include "sev.h"
56

67
#include <linux/kernel.h>
@@ -14,6 +15,8 @@
1415
#include <asm/fpu/xcr.h>
1516

1617
#define __BOOT_COMPRESSED
18+
#undef __init
19+
#define __init
1720

1821
/* Basic instruction decoding support needed */
1922
#include "../../lib/inat.c"

arch/x86/boot/compressed/sev.c

Lines changed: 39 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -32,102 +32,47 @@ struct ghcb *boot_ghcb;
3232
#undef __init
3333
#define __init
3434

35-
#undef __head
36-
#define __head
37-
3835
#define __BOOT_COMPRESSED
3936

40-
extern struct svsm_ca *boot_svsm_caa;
41-
extern u64 boot_svsm_caa_pa;
42-
43-
struct svsm_ca *svsm_get_caa(void)
44-
{
45-
return boot_svsm_caa;
46-
}
47-
48-
u64 svsm_get_caa_pa(void)
49-
{
50-
return boot_svsm_caa_pa;
51-
}
52-
53-
int svsm_perform_call_protocol(struct svsm_call *call);
54-
5537
u8 snp_vmpl;
38+
u16 ghcb_version;
39+
40+
u64 boot_svsm_caa_pa;
5641

5742
/* Include code for early handlers */
5843
#include "../../boot/startup/sev-shared.c"
5944

60-
int svsm_perform_call_protocol(struct svsm_call *call)
61-
{
62-
struct ghcb *ghcb;
63-
int ret;
64-
65-
if (boot_ghcb)
66-
ghcb = boot_ghcb;
67-
else
68-
ghcb = NULL;
69-
70-
do {
71-
ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call)
72-
: svsm_perform_msr_protocol(call);
73-
} while (ret == -EAGAIN);
74-
75-
return ret;
76-
}
77-
7845
static bool sev_snp_enabled(void)
7946
{
8047
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
8148
}
8249

83-
static void __page_state_change(unsigned long paddr, enum psc_op op)
84-
{
85-
u64 val, msr;
86-
87-
/*
88-
* If private -> shared then invalidate the page before requesting the
89-
* state change in the RMP table.
90-
*/
91-
if (op == SNP_PAGE_STATE_SHARED)
92-
pvalidate_4k_page(paddr, paddr, false);
93-
94-
/* Save the current GHCB MSR value */
95-
msr = sev_es_rd_ghcb_msr();
96-
97-
/* Issue VMGEXIT to change the page state in RMP table. */
98-
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
99-
VMGEXIT();
100-
101-
/* Read the response of the VMGEXIT. */
102-
val = sev_es_rd_ghcb_msr();
103-
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
104-
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
105-
106-
/* Restore the GHCB MSR value */
107-
sev_es_wr_ghcb_msr(msr);
108-
109-
/*
110-
* Now that page state is changed in the RMP table, validate it so that it is
111-
* consistent with the RMP entry.
112-
*/
113-
if (op == SNP_PAGE_STATE_PRIVATE)
114-
pvalidate_4k_page(paddr, paddr, true);
115-
}
116-
11750
void snp_set_page_private(unsigned long paddr)
11851
{
52+
struct psc_desc d = {
53+
SNP_PAGE_STATE_PRIVATE,
54+
(struct svsm_ca *)boot_svsm_caa_pa,
55+
boot_svsm_caa_pa
56+
};
57+
11958
if (!sev_snp_enabled())
12059
return;
12160

122-
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
61+
__page_state_change(paddr, paddr, &d);
12362
}
12463

12564
void snp_set_page_shared(unsigned long paddr)
12665
{
66+
struct psc_desc d = {
67+
SNP_PAGE_STATE_SHARED,
68+
(struct svsm_ca *)boot_svsm_caa_pa,
69+
boot_svsm_caa_pa
70+
};
71+
12772
if (!sev_snp_enabled())
12873
return;
12974

130-
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
75+
__page_state_change(paddr, paddr, &d);
13176
}
13277

13378
bool early_setup_ghcb(void)
@@ -152,8 +97,14 @@ bool early_setup_ghcb(void)
15297

15398
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
15499
{
100+
struct psc_desc d = {
101+
SNP_PAGE_STATE_PRIVATE,
102+
(struct svsm_ca *)boot_svsm_caa_pa,
103+
boot_svsm_caa_pa
104+
};
105+
155106
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
156-
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
107+
__page_state_change(pa, pa, &d);
157108
}
158109

159110
void sev_es_shutdown_ghcb(void)
@@ -235,15 +186,23 @@ bool sev_es_check_ghcb_fault(unsigned long address)
235186
MSR_AMD64_SNP_VMSA_REG_PROT | \
236187
MSR_AMD64_SNP_RESERVED_BIT13 | \
237188
MSR_AMD64_SNP_RESERVED_BIT15 | \
189+
MSR_AMD64_SNP_SECURE_AVIC | \
238190
MSR_AMD64_SNP_RESERVED_MASK)
239191

192+
#ifdef CONFIG_AMD_SECURE_AVIC
193+
#define SNP_FEATURE_SECURE_AVIC MSR_AMD64_SNP_SECURE_AVIC
194+
#else
195+
#define SNP_FEATURE_SECURE_AVIC 0
196+
#endif
197+
240198
/*
241199
* SNP_FEATURES_PRESENT is the mask of SNP features that are implemented
242200
* by the guest kernel. As and when a new feature is implemented in the
243201
* guest kernel, a corresponding bit should be added to the mask.
244202
*/
245203
#define SNP_FEATURES_PRESENT (MSR_AMD64_SNP_DEBUG_SWAP | \
246-
MSR_AMD64_SNP_SECURE_TSC)
204+
MSR_AMD64_SNP_SECURE_TSC | \
205+
SNP_FEATURE_SECURE_AVIC)
247206

248207
u64 snp_get_unsupported_features(u64 status)
249208
{
@@ -347,7 +306,7 @@ static bool early_snp_init(struct boot_params *bp)
347306
* running at VMPL0. The CA will be used to communicate with the
348307
* SVSM and request its services.
349308
*/
350-
svsm_setup_ca(cc_info);
309+
svsm_setup_ca(cc_info, rip_rel_ptr(&boot_ghcb_page));
351310

352311
/*
353312
* Pass run-time kernel a pointer to CC info via boot_params so EFI
@@ -391,6 +350,8 @@ static int sev_check_cpu_support(void)
391350
if (!(eax & BIT(1)))
392351
return -ENODEV;
393352

353+
sev_snp_needs_sfw = !(ebx & BIT(31));
354+
394355
return ebx & 0x3f;
395356
}
396357

@@ -453,30 +414,16 @@ void sev_enable(struct boot_params *bp)
453414
*/
454415
if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
455416
u64 hv_features;
456-
int ret;
457417

458418
hv_features = get_hv_features();
459419
if (!(hv_features & GHCB_HV_FT_SNP))
460420
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
461421

462422
/*
463-
* Enforce running at VMPL0 or with an SVSM.
464-
*
465-
* Use RMPADJUST (see the rmpadjust() function for a description of
466-
* what the instruction does) to update the VMPL1 permissions of a
467-
* page. If the guest is running at VMPL0, this will succeed. If the
468-
* guest is running at any other VMPL, this will fail. Linux SNP guests
469-
* only ever run at a single VMPL level so permission mask changes of a
470-
* lesser-privileged VMPL are a don't-care.
471-
*/
472-
ret = rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, 1);
473-
474-
/*
475-
* Running at VMPL0 is not required if an SVSM is present and the hypervisor
476-
* supports the required SVSM GHCB events.
423+
* Running at VMPL0 is required unless an SVSM is present and
424+
* the hypervisor supports the required SVSM GHCB events.
477425
*/
478-
if (ret &&
479-
!(snp_vmpl && (hv_features & GHCB_HV_FT_SNP_MULTI_VMPL)))
426+
if (snp_vmpl && !(hv_features & GHCB_HV_FT_SNP_MULTI_VMPL))
480427
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
481428
}
482429

@@ -550,7 +497,6 @@ bool early_is_sevsnp_guest(void)
550497

551498
/* Obtain the address of the calling area to use */
552499
boot_rdmsr(MSR_SVSM_CAA, &m);
553-
boot_svsm_caa = (void *)m.q;
554500
boot_svsm_caa_pa = m.q;
555501

556502
/*

arch/x86/boot/cpuflags.c

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -106,18 +106,5 @@ void get_cpuflags(void)
106106
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
107107
&cpu.flags[1]);
108108
}
109-
110-
if (max_amd_level >= 0x8000001f) {
111-
u32 ebx;
112-
113-
/*
114-
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
115-
* the virtualization flags entry (word 8) and set by
116-
* scattered.c, so the bit needs to be explicitly set.
117-
*/
118-
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
119-
if (ebx & BIT(31))
120-
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
121-
}
122109
}
123110
}

arch/x86/boot/startup/Makefile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ KBUILD_AFLAGS += -D__DISABLE_EXPORTS
44
KBUILD_CFLAGS += -D__DISABLE_EXPORTS -mcmodel=small -fPIC \
55
-Os -DDISABLE_BRANCH_PROFILING \
66
$(DISABLE_STACKLEAK_PLUGIN) \
7+
$(DISABLE_LATENT_ENTROPY_PLUGIN) \
78
-fno-stack-protector -D__NO_FORTIFY \
89
-fno-jump-tables \
910
-include $(srctree)/include/linux/hidden.h
@@ -19,6 +20,7 @@ KCOV_INSTRUMENT := n
1920

2021
obj-$(CONFIG_X86_64) += gdt_idt.o map_kernel.o
2122
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sme.o sev-startup.o
23+
pi-objs := $(patsubst %.o,$(obj)/%.o,$(obj-y))
2224

2325
lib-$(CONFIG_X86_64) += la57toggle.o
2426
lib-$(CONFIG_EFI_MIXED) += efi-mixed.o
@@ -28,3 +30,23 @@ lib-$(CONFIG_EFI_MIXED) += efi-mixed.o
2830
# to be linked into the decompressor or the EFI stub but not vmlinux
2931
#
3032
$(patsubst %.o,$(obj)/%.o,$(lib-y)): OBJECT_FILES_NON_STANDARD := y
33+
34+
#
35+
# Invoke objtool for each object individually to check for absolute
36+
# relocations, even if other objtool actions are being deferred.
37+
#
38+
$(pi-objs): objtool-enabled = 1
39+
$(pi-objs): objtool-args = $(if $(delay-objtool),,$(objtool-args-y)) --noabs
40+
41+
#
42+
# Confine the startup code by prefixing all symbols with __pi_ (for position
43+
# independent). This ensures that startup code can only call other startup
44+
# code, or code that has explicitly been made accessible to it via a symbol
45+
# alias.
46+
#
47+
$(obj)/%.pi.o: OBJCOPYFLAGS := --prefix-symbols=__pi_
48+
$(obj)/%.pi.o: $(obj)/%.o FORCE
49+
$(call if_changed,objcopy)
50+
51+
targets += $(obj-y)
52+
obj-y := $(patsubst %.o,%.pi.o,$(obj-y))

arch/x86/boot/startup/exports.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
2+
/*
3+
* The symbols below are functions that are implemented by the startup code,
4+
* but called at runtime by the SEV code residing in the core kernel.
5+
*/
6+
PROVIDE(early_set_pages_state = __pi_early_set_pages_state);
7+
PROVIDE(early_snp_set_memory_private = __pi_early_snp_set_memory_private);
8+
PROVIDE(early_snp_set_memory_shared = __pi_early_snp_set_memory_shared);
9+
PROVIDE(get_hv_features = __pi_get_hv_features);
10+
PROVIDE(sev_es_terminate = __pi_sev_es_terminate);
11+
PROVIDE(snp_cpuid = __pi_snp_cpuid);
12+
PROVIDE(snp_cpuid_get_table = __pi_snp_cpuid_get_table);
13+
PROVIDE(svsm_issue_call = __pi_svsm_issue_call);
14+
PROVIDE(svsm_process_result_codes = __pi_svsm_process_result_codes);

0 commit comments

Comments
 (0)