Skip to content

Commit 7b71205

Browse files
rpptakpm00
authored andcommitted
kho: fix restoring of contiguous ranges of order-0 pages
When contiguous ranges of order-0 pages are restored, kho_restore_page() calls prep_compound_page() with the first page in the range and order as parameters and then kho_restore_pages() calls split_page() to make sure all pages in the range are order-0. However, since split_page() is not intended to split compound pages and with VM_DEBUG enabled it will trigger a VM_BUG_ON_PAGE(). Update kho_restore_page() so that it will use prep_compound_page() when it restores a folio and make sure it properly sets page count for both large folios and ranges of order-0 pages. Link: https://lkml.kernel.org/r/20251125110917.843744-3-rppt@kernel.org Fixes: a667300 ("kho: add support for preserving vmalloc allocations") Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reported-by: Pratyush Yadav <pratyush@kernel.org> Cc: Alexander Graf <graf@amazon.com> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 4bc84cd commit 7b71205

1 file changed

Lines changed: 12 additions & 8 deletions

File tree

kernel/liveupdate/kexec_handover.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -219,11 +219,11 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn,
219219
return 0;
220220
}
221221

222-
static struct page *kho_restore_page(phys_addr_t phys)
222+
static struct page *kho_restore_page(phys_addr_t phys, bool is_folio)
223223
{
224224
struct page *page = pfn_to_online_page(PHYS_PFN(phys));
225+
unsigned int nr_pages, ref_cnt;
225226
union kho_page_info info;
226-
unsigned int nr_pages;
227227

228228
if (!page)
229229
return NULL;
@@ -243,11 +243,16 @@ static struct page *kho_restore_page(phys_addr_t phys)
243243
/* Head page gets refcount of 1. */
244244
set_page_count(page, 1);
245245

246-
/* For higher order folios, tail pages get a page count of zero. */
246+
/*
247+
* For higher order folios, tail pages get a page count of zero.
248+
* For physically contiguous order-0 pages every pages gets a page
249+
* count of 1
250+
*/
251+
ref_cnt = is_folio ? 0 : 1;
247252
for (unsigned int i = 1; i < nr_pages; i++)
248-
set_page_count(page + i, 0);
253+
set_page_count(page + i, ref_cnt);
249254

250-
if (info.order > 0)
255+
if (is_folio && info.order)
251256
prep_compound_page(page, info.order);
252257

253258
adjust_managed_page_count(page, nr_pages);
@@ -262,7 +267,7 @@ static struct page *kho_restore_page(phys_addr_t phys)
262267
*/
263268
struct folio *kho_restore_folio(phys_addr_t phys)
264269
{
265-
struct page *page = kho_restore_page(phys);
270+
struct page *page = kho_restore_page(phys, true);
266271

267272
return page ? page_folio(page) : NULL;
268273
}
@@ -287,11 +292,10 @@ struct page *kho_restore_pages(phys_addr_t phys, unsigned int nr_pages)
287292
while (pfn < end_pfn) {
288293
const unsigned int order =
289294
min(count_trailing_zeros(pfn), ilog2(end_pfn - pfn));
290-
struct page *page = kho_restore_page(PFN_PHYS(pfn));
295+
struct page *page = kho_restore_page(PFN_PHYS(pfn), false);
291296

292297
if (!page)
293298
return NULL;
294-
split_page(page, order);
295299
pfn += 1 << order;
296300
}
297301

0 commit comments

Comments
 (0)