Skip to content

Commit 4fbb7e7

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
xtensa: implement the new page table range API
Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and flush_icache_pages(). Link: https://lkml.kernel.org/r/20230802151406.3735276-30-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent a3e1c93 commit 4fbb7e7

3 files changed

Lines changed: 63 additions & 47 deletions

File tree

arch/xtensa/include/asm/cacheflush.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,8 +119,14 @@ void flush_cache_page(struct vm_area_struct*,
119119
#define flush_cache_vmap(start,end) flush_cache_all()
120120
#define flush_cache_vunmap(start,end) flush_cache_all()
121121

122+
void flush_dcache_folio(struct folio *folio);
123+
#define flush_dcache_folio flush_dcache_folio
124+
122125
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
123-
void flush_dcache_page(struct page *);
126+
static inline void flush_dcache_page(struct page *page)
127+
{
128+
flush_dcache_folio(page_folio(page));
129+
}
124130

125131
void local_flush_cache_range(struct vm_area_struct *vma,
126132
unsigned long start, unsigned long end);
@@ -156,6 +162,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
156162

157163
/* This is not required, see Documentation/core-api/cachetlb.rst */
158164
#define flush_icache_page(vma,page) do { } while (0)
165+
#define flush_icache_pages(vma, page, nr) do { } while (0)
159166

160167
#define flush_dcache_mmap_lock(mapping) do { } while (0)
161168
#define flush_dcache_mmap_unlock(mapping) do { } while (0)

arch/xtensa/include/asm/pgtable.h

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,7 @@ static inline pte_t pte_mkwrite(pte_t pte)
274274
* and a page entry and page directory to the page they refer to.
275275
*/
276276

277+
#define PFN_PTE_SHIFT PAGE_SHIFT
277278
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
278279
#define pte_same(a,b) (pte_val(a) == pte_val(b))
279280
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -301,15 +302,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval)
301302

302303
struct mm_struct;
303304

304-
static inline void
305-
set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
306-
{
307-
update_pte(ptep, pteval);
308-
}
309-
310-
static inline void set_pte(pte_t *ptep, pte_t pteval)
305+
static inline void set_pte(pte_t *ptep, pte_t pte)
311306
{
312-
update_pte(ptep, pteval);
307+
update_pte(ptep, pte);
313308
}
314309

315310
static inline void
@@ -407,8 +402,11 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
407402

408403
#else
409404

410-
extern void update_mmu_cache(struct vm_area_struct * vma,
411-
unsigned long address, pte_t *ptep);
405+
struct vm_fault;
406+
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
407+
unsigned long address, pte_t *ptep, unsigned int nr);
408+
#define update_mmu_cache(vma, address, ptep) \
409+
update_mmu_cache_range(NULL, vma, address, ptep, 1)
412410

413411
typedef pte_t *pte_addr_t;
414412

arch/xtensa/mm/cache.c

Lines changed: 47 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,9 @@ EXPORT_SYMBOL(copy_user_highpage);
121121
*
122122
*/
123123

124-
void flush_dcache_page(struct page *page)
124+
void flush_dcache_folio(struct folio *folio)
125125
{
126-
struct address_space *mapping = page_mapping_file(page);
126+
struct address_space *mapping = folio_flush_mapping(folio);
127127

128128
/*
129129
* If we have a mapping but the page is not mapped to user-space
@@ -132,14 +132,14 @@ void flush_dcache_page(struct page *page)
132132
*/
133133

134134
if (mapping && !mapping_mapped(mapping)) {
135-
if (!test_bit(PG_arch_1, &page->flags))
136-
set_bit(PG_arch_1, &page->flags);
135+
if (!test_bit(PG_arch_1, &folio->flags))
136+
set_bit(PG_arch_1, &folio->flags);
137137
return;
138138

139139
} else {
140-
141-
unsigned long phys = page_to_phys(page);
142-
unsigned long temp = page->index << PAGE_SHIFT;
140+
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
141+
unsigned long temp = folio_pos(folio);
142+
unsigned int i, nr = folio_nr_pages(folio);
143143
unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
144144
unsigned long virt;
145145

@@ -154,22 +154,26 @@ void flush_dcache_page(struct page *page)
154154
return;
155155

156156
preempt_disable();
157-
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
158-
__flush_invalidate_dcache_page_alias(virt, phys);
157+
for (i = 0; i < nr; i++) {
158+
virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
159+
__flush_invalidate_dcache_page_alias(virt, phys);
159160

160-
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
161+
virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
161162

162-
if (alias)
163-
__flush_invalidate_dcache_page_alias(virt, phys);
163+
if (alias)
164+
__flush_invalidate_dcache_page_alias(virt, phys);
164165

165-
if (mapping)
166-
__invalidate_icache_page_alias(virt, phys);
166+
if (mapping)
167+
__invalidate_icache_page_alias(virt, phys);
168+
phys += PAGE_SIZE;
169+
temp += PAGE_SIZE;
170+
}
167171
preempt_enable();
168172
}
169173

170174
/* There shouldn't be an entry in the cache for this page anymore. */
171175
}
172-
EXPORT_SYMBOL(flush_dcache_page);
176+
EXPORT_SYMBOL(flush_dcache_folio);
173177

174178
/*
175179
* For now, flush the whole cache. FIXME??
@@ -207,45 +211,52 @@ EXPORT_SYMBOL(local_flush_cache_page);
207211

208212
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
209213

210-
void
211-
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
214+
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
215+
unsigned long addr, pte_t *ptep, unsigned int nr)
212216
{
213217
unsigned long pfn = pte_pfn(*ptep);
214-
struct page *page;
218+
struct folio *folio;
219+
unsigned int i;
215220

216221
if (!pfn_valid(pfn))
217222
return;
218223

219-
page = pfn_to_page(pfn);
224+
folio = page_folio(pfn_to_page(pfn));
220225

221-
/* Invalidate old entry in TLBs */
222-
223-
flush_tlb_page(vma, addr);
226+
/* Invalidate old entries in TLBs */
227+
for (i = 0; i < nr; i++)
228+
flush_tlb_page(vma, addr + i * PAGE_SIZE);
229+
nr = folio_nr_pages(folio);
224230

225231
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
226232

227-
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
228-
unsigned long phys = page_to_phys(page);
233+
if (!folio_test_reserved(folio) && test_bit(PG_arch_1, &folio->flags)) {
234+
unsigned long phys = folio_pfn(folio) * PAGE_SIZE;
229235
unsigned long tmp;
230236

231237
preempt_disable();
232-
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
233-
__flush_invalidate_dcache_page_alias(tmp, phys);
234-
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
235-
__flush_invalidate_dcache_page_alias(tmp, phys);
236-
__invalidate_icache_page_alias(tmp, phys);
238+
for (i = 0; i < nr; i++) {
239+
tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
240+
__flush_invalidate_dcache_page_alias(tmp, phys);
241+
tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
242+
__flush_invalidate_dcache_page_alias(tmp, phys);
243+
__invalidate_icache_page_alias(tmp, phys);
244+
phys += PAGE_SIZE;
245+
}
237246
preempt_enable();
238247

239-
clear_bit(PG_arch_1, &page->flags);
248+
clear_bit(PG_arch_1, &folio->flags);
240249
}
241250
#else
242-
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
251+
if (!folio_test_reserved(folio) && !test_bit(PG_arch_1, &folio->flags)
243252
&& (vma->vm_flags & VM_EXEC) != 0) {
244-
unsigned long paddr = (unsigned long)kmap_atomic(page);
245-
__flush_dcache_page(paddr);
246-
__invalidate_icache_page(paddr);
247-
set_bit(PG_arch_1, &page->flags);
248-
kunmap_atomic((void *)paddr);
253+
for (i = 0; i < nr; i++) {
254+
void *paddr = kmap_local_folio(folio, i * PAGE_SIZE);
255+
__flush_dcache_page((unsigned long)paddr);
256+
__invalidate_icache_page((unsigned long)paddr);
257+
kunmap_local(paddr);
258+
}
259+
set_bit(PG_arch_1, &folio->flags);
249260
}
250261
#endif
251262
}

0 commit comments

Comments
 (0)