Skip to content

Commit a762a4c

Browse files
tlendackygregkh
authored andcommitted
x86/sev: Evict cache lines during SNP memory validation
Commit 7b306df upstream. An SNP cache coherency vulnerability requires a cache line eviction mitigation when validating memory after a page state change to private. The specific mitigation is to touch the first and last byte of each 4K page that is being validated. There is no need to perform the mitigation when performing a page state change to shared and rescinding validation. CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that, when set, indicates that the software mitigation for this vulnerability is not needed. Implement the mitigation and invoke it when validating memory (making it private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest is vulnerable. Co-developed-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 580db27 commit a762a4c

4 files changed

Lines changed: 61 additions & 0 deletions

File tree

arch/x86/boot/cpuflags.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,5 +106,18 @@ void get_cpuflags(void)
106106
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
107107
&cpu.flags[1]);
108108
}
109+
110+
if (max_amd_level >= 0x8000001f) {
111+
u32 ebx;
112+
113+
/*
114+
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
115+
* the virtualization flags entry (word 8) and set by
116+
* scattered.c, so the bit needs to be explicitly set.
117+
*/
118+
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
119+
if (ebx & BIT(31))
120+
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
121+
}
109122
}
110123
}

arch/x86/coco/sev/shared.c

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1254,6 +1254,24 @@ static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svs
12541254
__pval_terminate(pfn, action, page_size, ret, svsm_ret);
12551255
}
12561256

1257+
static inline void sev_evict_cache(void *va, int npages)
1258+
{
1259+
volatile u8 val __always_unused;
1260+
u8 *bytes = va;
1261+
int page_idx;
1262+
1263+
/*
1264+
* For SEV guests, a read from the first/last cache-lines of a 4K page
1265+
* using the guest key is sufficient to cause a flush of all cache-lines
1266+
* associated with that 4K page without incurring all the overhead of a
1267+
* full CLFLUSH sequence.
1268+
*/
1269+
for (page_idx = 0; page_idx < npages; page_idx++) {
1270+
val = bytes[page_idx * PAGE_SIZE];
1271+
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
1272+
}
1273+
}
1274+
12571275
static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
12581276
{
12591277
struct svsm_pvalidate_call *pc;
@@ -1307,6 +1325,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
13071325
if (ret)
13081326
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
13091327
}
1328+
1329+
/*
1330+
* If validating memory (making it private) and affected by the
1331+
* cache-coherency vulnerability, perform the cache eviction mitigation.
1332+
*/
1333+
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
1334+
sev_evict_cache((void *)vaddr, 1);
13101335
}
13111336

13121337
static void pval_pages(struct snp_psc_desc *desc)
@@ -1491,10 +1516,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
14911516

14921517
static void pvalidate_pages(struct snp_psc_desc *desc)
14931518
{
1519+
struct psc_entry *e;
1520+
unsigned int i;
1521+
14941522
if (snp_vmpl)
14951523
svsm_pval_pages(desc);
14961524
else
14971525
pval_pages(desc);
1526+
1527+
/*
1528+
* If not affected by the cache-coherency vulnerability there is no need
1529+
* to perform the cache eviction mitigation.
1530+
*/
1531+
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
1532+
return;
1533+
1534+
for (i = 0; i <= desc->hdr.end_entry; i++) {
1535+
e = &desc->entries[i];
1536+
1537+
/*
1538+
* If validating memory (making it private) perform the cache
1539+
* eviction mitigation.
1540+
*/
1541+
if (e->operation == SNP_PAGE_STATE_PRIVATE)
1542+
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
1543+
}
14981544
}
14991545

15001546
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@
218218
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
219219
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
220220
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
221+
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
221222

222223
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
223224
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */

arch/x86/kernel/cpu/scattered.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ static const struct cpuid_bit cpuid_bits[] = {
4747
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
4848
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
4949
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
50+
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
5051
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
5152
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
5253
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },

0 commit comments

Comments
 (0)