Skip to content

Commit 5003a2b

Browse files
Matthew Wilcox (Oracle)akpm00
authored andcommitted
mm: call update_mmu_cache_range() in more page fault handling paths
Pass the vm_fault to the architecture to help it make smarter decisions about which PTEs to insert into the TLB. Link: https://lkml.kernel.org/r/20230802151406.3735276-39-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 617c28e commit 5003a2b

1 file changed

Lines changed: 8 additions & 7 deletions

File tree

mm/memory.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2862,7 +2862,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src,
28622862

28632863
entry = pte_mkyoung(vmf->orig_pte);
28642864
if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2865-
update_mmu_cache(vma, addr, vmf->pte);
2865+
update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
28662866
}
28672867

28682868
/*
@@ -3039,7 +3039,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
30393039
entry = pte_mkyoung(vmf->orig_pte);
30403040
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
30413041
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3042-
update_mmu_cache(vma, vmf->address, vmf->pte);
3042+
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
30433043
pte_unmap_unlock(vmf->pte, vmf->ptl);
30443044
count_vm_event(PGREUSE);
30453045
}
@@ -3163,7 +3163,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
31633163
*/
31643164
BUG_ON(unshare && pte_write(entry));
31653165
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3166-
update_mmu_cache(vma, vmf->address, vmf->pte);
3166+
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
31673167
if (old_folio) {
31683168
/*
31693169
* Only after switching the pte to the new page may
@@ -4046,7 +4046,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
40464046
}
40474047

40484048
/* No need to invalidate - it was non-present before */
4049-
update_mmu_cache(vma, vmf->address, vmf->pte);
4049+
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
40504050
unlock:
40514051
if (vmf->pte)
40524052
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4170,7 +4170,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
41704170
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
41714171

41724172
/* No need to invalidate - it was non-present before */
4173-
update_mmu_cache(vma, vmf->address, vmf->pte);
4173+
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
41744174
unlock:
41754175
if (vmf->pte)
41764176
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -4859,7 +4859,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
48594859
if (writable)
48604860
pte = pte_mkwrite(pte);
48614861
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4862-
update_mmu_cache(vma, vmf->address, vmf->pte);
4862+
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
48634863
pte_unmap_unlock(vmf->pte, vmf->ptl);
48644864
goto out;
48654865
}
@@ -5030,7 +5030,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
50305030
entry = pte_mkyoung(entry);
50315031
if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
50325032
vmf->flags & FAULT_FLAG_WRITE)) {
5033-
update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
5033+
update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5034+
vmf->pte, 1);
50345035
} else {
50355036
/* Skip spurious TLB flush for retried page fault */
50365037
if (vmf->flags & FAULT_FLAG_TRIED)

0 commit comments

Comments
 (0)