Skip to content

Commit b3fdf93

Browse files
jchu314atgithubdjbw
authored andcommitted
x86/mce: relocate set{clear}_mce_nospec() functions
Relocate the twin mce functions to arch/x86/mm/pat/set_memory.c file where they belong. While at it, fixup a function name in a comment. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jane Chu <jane.chu@oracle.com> Acked-by: Borislav Petkov <bp@suse.de> Cc: Stephen Rothwell <sfr@canb.auug.org.au> [sfr: gate {set,clear}_mce_nospec() by CONFIG_X86_64] Link: https://lore.kernel.org/r/165272527328.90175.8336008202048685278.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 7917f9c commit b3fdf93

3 files changed

Lines changed: 52 additions & 58 deletions

File tree

arch/x86/include/asm/set_memory.h

Lines changed: 0 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -86,56 +86,4 @@ bool kernel_page_present(struct page *page);
8686

8787
extern int kernel_set_to_readonly;
8888

89-
#ifdef CONFIG_X86_64
90-
/*
91-
* Prevent speculative access to the page by either unmapping
92-
* it (if we do not require access to any part of the page) or
93-
* marking it uncacheable (if we want to try to retrieve data
94-
* from non-poisoned lines in the page).
95-
*/
96-
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
97-
{
98-
unsigned long decoy_addr;
99-
int rc;
100-
101-
/* SGX pages are not in the 1:1 map */
102-
if (arch_is_platform_page(pfn << PAGE_SHIFT))
103-
return 0;
104-
/*
105-
* We would like to just call:
106-
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
107-
* but doing that would radically increase the odds of a
108-
* speculative access to the poison page because we'd have
109-
* the virtual address of the kernel 1:1 mapping sitting
110-
* around in registers.
111-
* Instead we get tricky. We create a non-canonical address
112-
* that looks just like the one we want, but has bit 63 flipped.
113-
* This relies on set_memory_XX() properly sanitizing any __pa()
114-
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
115-
*/
116-
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
117-
118-
if (unmap)
119-
rc = set_memory_np(decoy_addr, 1);
120-
else
121-
rc = set_memory_uc(decoy_addr, 1);
122-
if (rc)
123-
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
124-
return rc;
125-
}
126-
#define set_mce_nospec set_mce_nospec
127-
128-
/* Restore full speculative operation to the pfn. */
129-
static inline int clear_mce_nospec(unsigned long pfn)
130-
{
131-
return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
132-
}
133-
#define clear_mce_nospec clear_mce_nospec
134-
#else
135-
/*
136-
* Few people would run a 32-bit kernel on a machine that supports
137-
* recoverable errors because they have too much memory to boot 32-bit.
138-
*/
139-
#endif
140-
14189
#endif /* _ASM_X86_SET_MEMORY_H */

arch/x86/mm/pat/set_memory.c

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/vmstat.h>
2020
#include <linux/kernel.h>
2121
#include <linux/cc_platform.h>
22+
#include <linux/set_memory.h>
2223

2324
#include <asm/e820/api.h>
2425
#include <asm/processor.h>
@@ -29,7 +30,6 @@
2930
#include <asm/pgalloc.h>
3031
#include <asm/proto.h>
3132
#include <asm/memtype.h>
32-
#include <asm/set_memory.h>
3333
#include <asm/hyperv-tlfs.h>
3434
#include <asm/mshyperv.h>
3535

@@ -1816,7 +1816,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
18161816
}
18171817

18181818
/*
1819-
* _set_memory_prot is an internal helper for callers that have been passed
1819+
* __set_memory_prot is an internal helper for callers that have been passed
18201820
* a pgprot_t value from upper layers and a reservation has already been taken.
18211821
* If you want to set the pgprot to a specific page protocol, use the
18221822
* set_memory_xx() functions.
@@ -1925,6 +1925,52 @@ int set_memory_wb(unsigned long addr, int numpages)
19251925
}
19261926
EXPORT_SYMBOL(set_memory_wb);
19271927

1928+
/*
1929+
* Prevent speculative access to the page by either unmapping
1930+
* it (if we do not require access to any part of the page) or
1931+
* marking it uncacheable (if we want to try to retrieve data
1932+
* from non-poisoned lines in the page).
1933+
*/
1934+
#ifdef CONFIG_X86_64
1935+
int set_mce_nospec(unsigned long pfn, bool unmap)
1936+
{
1937+
unsigned long decoy_addr;
1938+
int rc;
1939+
1940+
/* SGX pages are not in the 1:1 map */
1941+
if (arch_is_platform_page(pfn << PAGE_SHIFT))
1942+
return 0;
1943+
/*
1944+
* We would like to just call:
1945+
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
1946+
* but doing that would radically increase the odds of a
1947+
* speculative access to the poison page because we'd have
1948+
* the virtual address of the kernel 1:1 mapping sitting
1949+
* around in registers.
1950+
* Instead we get tricky. We create a non-canonical address
1951+
* that looks just like the one we want, but has bit 63 flipped.
1952+
* This relies on set_memory_XX() properly sanitizing any __pa()
1953+
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
1954+
*/
1955+
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
1956+
1957+
if (unmap)
1958+
rc = set_memory_np(decoy_addr, 1);
1959+
else
1960+
rc = set_memory_uc(decoy_addr, 1);
1961+
if (rc)
1962+
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
1963+
return rc;
1964+
}
1965+
1966+
/* Restore full speculative operation to the pfn. */
1967+
int clear_mce_nospec(unsigned long pfn)
1968+
{
1969+
return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
1970+
}
1971+
EXPORT_SYMBOL_GPL(clear_mce_nospec);
1972+
#endif /* CONFIG_X86_64 */
1973+
19281974
int set_memory_x(unsigned long addr, int numpages)
19291975
{
19301976
if (!(__supported_pte_mask & _PAGE_NX))

include/linux/set_memory.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,14 +42,14 @@ static inline bool can_set_direct_map(void)
4242
#endif
4343
#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */
4444

45-
#ifndef set_mce_nospec
45+
#ifdef CONFIG_X86_64
46+
int set_mce_nospec(unsigned long pfn, bool unmap);
47+
int clear_mce_nospec(unsigned long pfn);
48+
#else
4649
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
4750
{
4851
return 0;
4952
}
50-
#endif
51-
52-
#ifndef clear_mce_nospec
5353
static inline int clear_mce_nospec(unsigned long pfn)
5454
{
5555
return 0;

0 commit comments

Comments
 (0)