|
6 | 6 | #ifndef _ASM_RISCV_PGTABLE_64_H |
7 | 7 | #define _ASM_RISCV_PGTABLE_64_H |
8 | 8 |
|
| 9 | +#include <linux/bits.h> |
9 | 10 | #include <linux/const.h> |
10 | 11 |
|
11 | 12 | extern bool pgtable_l4_enabled; |
@@ -65,6 +66,13 @@ typedef struct { |
65 | 66 |
|
66 | 67 | #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) |
67 | 68 |
|
| 69 | +/* |
| 70 | + * rv64 PTE format: |
| 71 | + * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
| 72 | + * N MT RSV PFN reserved for SW D A G U X W R V |
| 73 | + */ |
| 74 | +#define _PAGE_PFN_MASK GENMASK(53, 10) |
| 75 | + |
68 | 76 | static inline int pud_present(pud_t pud) |
69 | 77 | { |
70 | 78 | return (pud_val(pud) & _PAGE_PRESENT); |
@@ -108,12 +116,12 @@ static inline unsigned long _pud_pfn(pud_t pud) |
108 | 116 |
|
109 | 117 | static inline pmd_t *pud_pgtable(pud_t pud) |
110 | 118 | { |
111 | | - return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); |
| 119 | + return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud))); |
112 | 120 | } |
113 | 121 |
|
114 | 122 | static inline struct page *pud_page(pud_t pud) |
115 | 123 | { |
116 | | - return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT); |
| 124 | + return pfn_to_page(__page_val_to_pfn(pud_val(pud))); |
117 | 125 | } |
118 | 126 |
|
119 | 127 | #define mm_p4d_folded mm_p4d_folded |
@@ -143,7 +151,7 @@ static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) |
143 | 151 |
|
144 | 152 | static inline unsigned long _pmd_pfn(pmd_t pmd) |
145 | 153 | { |
146 | | - return pmd_val(pmd) >> _PAGE_PFN_SHIFT; |
| 154 | + return __page_val_to_pfn(pmd_val(pmd)); |
147 | 155 | } |
148 | 156 |
|
149 | 157 | #define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot) |
|
0 commit comments