Skip to content

Commit 7f76091

Browse files
davidhildenbrandtorvalds
authored andcommitted
mm/huge_memory: remove stale locking logic from __split_huge_pmd()
Let's remove the stale logic that was required for reuse_swap_page(). [akpm@linux-foundation.org: simplification, per Yang Shi] Link: https://lkml.kernel.org/r/20220131162940.210846-10-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: David Rientjes <rientjes@google.com> Cc: Don Dutile <ddutile@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Liang Zhang <zhangliang5@huawei.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 55c62fa commit 7f76091

1 file changed

Lines changed: 4 additions & 36 deletions

File tree

mm/huge_memory.c

Lines changed: 4 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -2133,8 +2133,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21332133
{
21342134
spinlock_t *ptl;
21352135
struct mmu_notifier_range range;
2136-
bool do_unlock_folio = false;
2137-
pmd_t _pmd;
21382136

21392137
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
21402138
address & HPAGE_PMD_MASK,
@@ -2153,42 +2151,12 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21532151
goto out;
21542152
}
21552153

2156-
repeat:
2157-
if (pmd_trans_huge(*pmd)) {
2158-
if (!folio) {
2159-
folio = page_folio(pmd_page(*pmd));
2160-
/*
2161-
* An anonymous page must be locked, to ensure that a
2162-
* concurrent reuse_swap_page() sees stable mapcount;
2163-
* but reuse_swap_page() is not used on shmem or file,
2164-
* and page lock must not be taken when zap_pmd_range()
2165-
* calls __split_huge_pmd() while i_mmap_lock is held.
2166-
*/
2167-
if (folio_test_anon(folio)) {
2168-
if (unlikely(!folio_trylock(folio))) {
2169-
folio_get(folio);
2170-
_pmd = *pmd;
2171-
spin_unlock(ptl);
2172-
folio_lock(folio);
2173-
spin_lock(ptl);
2174-
if (unlikely(!pmd_same(*pmd, _pmd))) {
2175-
folio_unlock(folio);
2176-
folio_put(folio);
2177-
folio = NULL;
2178-
goto repeat;
2179-
}
2180-
folio_put(folio);
2181-
}
2182-
do_unlock_folio = true;
2183-
}
2184-
}
2185-
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
2186-
goto out;
2187-
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2154+
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2155+
is_pmd_migration_entry(*pmd))
2156+
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
2157+
21882158
out:
21892159
spin_unlock(ptl);
2190-
if (do_unlock_folio)
2191-
folio_unlock(folio);
21922160
/*
21932161
* No need to double call mmu_notifier->invalidate_range() callback.
21942162
* They are 3 cases to consider inside __split_huge_pmd_locked():

0 commit comments

Comments
 (0)