|
19 | 19 | #include <linux/vmstat.h> |
20 | 20 | #include <linux/kernel.h> |
21 | 21 | #include <linux/cc_platform.h> |
| 22 | +#include <linux/set_memory.h> |
22 | 23 |
|
23 | 24 | #include <asm/e820/api.h> |
24 | 25 | #include <asm/processor.h> |
|
29 | 30 | #include <asm/pgalloc.h> |
30 | 31 | #include <asm/proto.h> |
31 | 32 | #include <asm/memtype.h> |
32 | | -#include <asm/set_memory.h> |
33 | 33 | #include <asm/hyperv-tlfs.h> |
34 | 34 | #include <asm/mshyperv.h> |
35 | 35 |
|
@@ -1805,7 +1805,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages, |
1805 | 1805 | } |
1806 | 1806 |
|
1807 | 1807 | /* |
1808 | | - * _set_memory_prot is an internal helper for callers that have been passed |
| 1808 | + * __set_memory_prot is an internal helper for callers that have been passed |
1809 | 1809 | * a pgprot_t value from upper layers and a reservation has already been taken. |
1810 | 1810 | * If you want to set the pgprot to a specific page protocol, use the |
1811 | 1811 | * set_memory_xx() functions. |
@@ -1914,6 +1914,51 @@ int set_memory_wb(unsigned long addr, int numpages) |
1914 | 1914 | } |
1915 | 1915 | EXPORT_SYMBOL(set_memory_wb); |
1916 | 1916 |
|
| 1917 | +/* Prevent speculative access to a page by marking it not-present */ |
| 1918 | +#ifdef CONFIG_X86_64 |
| 1919 | +int set_mce_nospec(unsigned long pfn) |
| 1920 | +{ |
| 1921 | + unsigned long decoy_addr; |
| 1922 | + int rc; |
| 1923 | + |
| 1924 | + /* SGX pages are not in the 1:1 map */ |
| 1925 | + if (arch_is_platform_page(pfn << PAGE_SHIFT)) |
| 1926 | + return 0; |
| 1927 | + /* |
| 1928 | + * We would like to just call: |
| 1929 | + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); |
| 1930 | + * but doing that would radically increase the odds of a |
| 1931 | + * speculative access to the poison page because we'd have |
| 1932 | + * the virtual address of the kernel 1:1 mapping sitting |
| 1933 | + * around in registers. |
| 1934 | + * Instead we get tricky. We create a non-canonical address |
| 1935 | + * that looks just like the one we want, but has bit 63 flipped. |
| 1936 | + * This relies on set_memory_XX() properly sanitizing any __pa() |
| 1937 | + * results with __PHYSICAL_MASK or PTE_PFN_MASK. |
| 1938 | + */ |
| 1939 | + decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); |
| 1940 | + |
| 1941 | + rc = set_memory_np(decoy_addr, 1); |
| 1942 | + if (rc) |
| 1943 | + pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); |
| 1944 | + return rc; |
| 1945 | +} |
| 1946 | + |
| 1947 | +static int set_memory_present(unsigned long *addr, int numpages) |
| 1948 | +{ |
| 1949 | + return change_page_attr_set(addr, numpages, __pgprot(_PAGE_PRESENT), 0); |
| 1950 | +} |
| 1951 | + |
| 1952 | +/* Restore full speculative operation to the pfn. */ |
| 1953 | +int clear_mce_nospec(unsigned long pfn) |
| 1954 | +{ |
| 1955 | + unsigned long addr = (unsigned long) pfn_to_kaddr(pfn); |
| 1956 | + |
| 1957 | + return set_memory_present(&addr, 1); |
| 1958 | +} |
| 1959 | +EXPORT_SYMBOL_GPL(clear_mce_nospec); |
| 1960 | +#endif /* CONFIG_X86_64 */ |
| 1961 | + |
1917 | 1962 | int set_memory_x(unsigned long addr, int numpages) |
1918 | 1963 | { |
1919 | 1964 | if (!(__supported_pte_mask & _PAGE_NX)) |
|
0 commit comments