Skip to content

Commit 377bf66

Browse files
committed
Revert "mm: fix initialization of struct page for holes in memory layout"
This reverts commit d3921cb. Chris Wilson reports that it causes boot problems: "We have half a dozen or so different machines in CI that are silently failing to boot, that we believe is bisected to this patch" and the CI team confirmed that a revert fixed the issues. The cause is unknown for now, so let's revert it. Link: https://lore.kernel.org/lkml/161160687463.28991.354987542182281928@build.alporthouse.com/ Reported-and-tested-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 13391c6 commit 377bf66

1 file changed

Lines changed: 34 additions & 50 deletions

File tree

mm/page_alloc.c

Lines changed: 34 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -7080,26 +7080,23 @@ void __init free_area_init_memoryless_node(int nid)
70807080
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
70817081
* PageReserved(). Return the number of struct pages that were initialized.
70827082
*/
7083-
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
7084-
int zone, int nid)
7083+
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
70857084
{
7086-
unsigned long pfn, zone_spfn, zone_epfn;
7085+
unsigned long pfn;
70877086
u64 pgcnt = 0;
70887087

7089-
zone_spfn = arch_zone_lowest_possible_pfn[zone];
7090-
zone_epfn = arch_zone_highest_possible_pfn[zone];
7091-
7092-
spfn = clamp(spfn, zone_spfn, zone_epfn);
7093-
epfn = clamp(epfn, zone_spfn, zone_epfn);
7094-
70957088
for (pfn = spfn; pfn < epfn; pfn++) {
70967089
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
70977090
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
70987091
+ pageblock_nr_pages - 1;
70997092
continue;
71007093
}
7101-
7102-
__init_single_page(pfn_to_page(pfn), pfn, zone, nid);
7094+
/*
7095+
* Use a fake node/zone (0) for now. Some of these pages
7096+
* (in memblock.reserved but not in memblock.memory) will
7097+
* get re-initialized via reserve_bootmem_region() later.
7098+
*/
7099+
__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
71037100
__SetPageReserved(pfn_to_page(pfn));
71047101
pgcnt++;
71057102
}
@@ -7108,64 +7105,51 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
71087105
}
71097106

71107107
/*
7111-
* Only struct pages that correspond to ranges defined by memblock.memory
7112-
* are zeroed and initialized by going through __init_single_page() during
7113-
* memmap_init().
7114-
*
7115-
* But, there could be struct pages that correspond to holes in
7116-
* memblock.memory. This can happen because of the following reasons:
7117-
* - phyiscal memory bank size is not necessarily the exact multiple of the
7118-
* arbitrary section size
7119-
* - early reserved memory may not be listed in memblock.memory
7120-
* - memory layouts defined with memmap= kernel parameter may not align
7121-
* nicely with memmap sections
7108+
* Only struct pages that are backed by physical memory are zeroed and
7109+
* initialized by going through __init_single_page(). But, there are some
7110+
* struct pages which are reserved in memblock allocator and their fields
7111+
* may be accessed (for example page_to_pfn() on some configuration accesses
7112+
* flags). We must explicitly initialize those struct pages.
71227113
*
7123-
* Explicitly initialize those struct pages so that:
7124-
* - PG_Reserved is set
7125-
* - zone link is set accorging to the architecture constrains
7126-
* - node is set to node id of the next populated region except for the
7127-
* trailing hole where last node id is used
7114+
* This function also addresses a similar issue where struct pages are left
7115+
* uninitialized because the physical address range is not covered by
7116+
* memblock.memory or memblock.reserved. That could happen when memblock
7117+
* layout is manually configured via memmap=, or when the highest physical
7118+
* address (max_pfn) does not end on a section boundary.
71287119
*/
7129-
static void __init init_zone_unavailable_mem(int zone)
7120+
static void __init init_unavailable_mem(void)
71307121
{
7131-
unsigned long start, end;
7132-
int i, nid;
7133-
u64 pgcnt;
7134-
unsigned long next = 0;
7122+
phys_addr_t start, end;
7123+
u64 i, pgcnt;
7124+
phys_addr_t next = 0;
71357125

71367126
/*
7137-
* Loop through holes in memblock.memory and initialize struct
7138-
* pages corresponding to these holes
7127+
* Loop through unavailable ranges not covered by memblock.memory.
71397128
*/
71407129
pgcnt = 0;
7141-
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7130+
for_each_mem_range(i, &start, &end) {
71427131
if (next < start)
7143-
pgcnt += init_unavailable_range(next, start, zone, nid);
7132+
pgcnt += init_unavailable_range(PFN_DOWN(next),
7133+
PFN_UP(start));
71447134
next = end;
71457135
}
71467136

71477137
/*
7148-
* Last section may surpass the actual end of memory (e.g. we can
7149-
* have 1Gb section and 512Mb of RAM pouplated).
7150-
* Make sure that memmap has a well defined state in this case.
7138+
* Early sections always have a fully populated memmap for the whole
7139+
* section - see pfn_valid(). If the last section has holes at the
7140+
* end and that section is marked "online", the memmap will be
7141+
* considered initialized. Make sure that memmap has a well defined
7142+
* state.
71517143
*/
7152-
end = round_up(max_pfn, PAGES_PER_SECTION);
7153-
pgcnt += init_unavailable_range(next, end, zone, nid);
7144+
pgcnt += init_unavailable_range(PFN_DOWN(next),
7145+
round_up(max_pfn, PAGES_PER_SECTION));
71547146

71557147
/*
71567148
* Struct pages that do not have backing memory. This could be because
71577149
* firmware is using some of this memory, or for some other reasons.
71587150
*/
71597151
if (pgcnt)
7160-
pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
7161-
}
7162-
7163-
static void __init init_unavailable_mem(void)
7164-
{
7165-
int zone;
7166-
7167-
for (zone = 0; zone < ZONE_MOVABLE; zone++)
7168-
init_zone_unavailable_mem(zone);
7152+
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
71697153
}
71707154
#else
71717155
static inline void __init init_unavailable_mem(void)

0 commit comments

Comments
 (0)