Skip to content

Commit 3acb913

Browse files
committed
mm/mm_init: use deferred_init_memmap_chunk() in deferred_grow_zone()
deferred_grow_zone() initializes one or more sections in the memory map if buddy runs out of initialized struct pages when CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. It loops through memblock regions and initializes and frees pages in MAX_ORDER_NR_PAGES chunks. Essentially the same loop is implemented in deferred_init_memmap_chunk(), the only actual difference is that deferred_init_memmap_chunk() does not count initialized pages. Make deferred_init_memmap_chunk() count the initialized pages and return their number, wrap it with deferred_init_memmap_job() for multithreaded initialization with padata_do_multithreaded() and replace open-coded initialization of struct pages in deferred_grow_zone() with a call to deferred_init_memmap_chunk(). Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
1 parent b320789 commit 3acb913

1 file changed

Lines changed: 36 additions & 34 deletions

File tree

mm/mm_init.c

Lines changed: 36 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -2134,12 +2134,12 @@ deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
21342134
return nr_pages;
21352135
}
21362136

2137-
static void __init
2137+
static unsigned long __init
21382138
deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
2139-
void *arg)
2139+
struct zone *zone)
21402140
{
2141+
unsigned long nr_pages = 0;
21412142
unsigned long spfn, epfn;
2142-
struct zone *zone = arg;
21432143
u64 i = 0;
21442144

21452145
deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
@@ -2149,9 +2149,23 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
21492149
* we can avoid introducing any issues with the buddy allocator.
21502150
*/
21512151
while (spfn < end_pfn) {
2152-
deferred_init_maxorder(&i, zone, &spfn, &epfn);
2153-
cond_resched();
2152+
nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2153+
if (irqs_disabled())
2154+
touch_nmi_watchdog();
2155+
else
2156+
cond_resched();
21542157
}
2158+
2159+
return nr_pages;
2160+
}
2161+
2162+
static void __init
2163+
deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn,
2164+
void *arg)
2165+
{
2166+
struct zone *zone = arg;
2167+
2168+
deferred_init_memmap_chunk(start_pfn, end_pfn, zone);
21552169
}
21562170

21572171
static unsigned int __init
@@ -2204,7 +2218,7 @@ static int __init deferred_init_memmap(void *data)
22042218
while (deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, first_init_pfn)) {
22052219
first_init_pfn = ALIGN(epfn, PAGES_PER_SECTION);
22062220
struct padata_mt_job job = {
2207-
.thread_fn = deferred_init_memmap_chunk,
2221+
.thread_fn = deferred_init_memmap_job,
22082222
.fn_arg = zone,
22092223
.start = spfn,
22102224
.size = first_init_pfn - spfn,
@@ -2240,12 +2254,11 @@ static int __init deferred_init_memmap(void *data)
22402254
*/
22412255
bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
22422256
{
2243-
unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2257+
unsigned long nr_pages_needed = SECTION_ALIGN_UP(1 << order);
22442258
pg_data_t *pgdat = zone->zone_pgdat;
22452259
unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
22462260
unsigned long spfn, epfn, flags;
22472261
unsigned long nr_pages = 0;
2248-
u64 i = 0;
22492262

22502263
/* Only the last zone may have deferred pages */
22512264
if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
@@ -2262,37 +2275,26 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
22622275
return true;
22632276
}
22642277

2265-
/* If the zone is empty somebody else may have cleared out the zone */
2266-
if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2267-
first_deferred_pfn)) {
2268-
pgdat->first_deferred_pfn = ULONG_MAX;
2269-
pgdat_resize_unlock(pgdat, &flags);
2270-
/* Retry only once. */
2271-
return first_deferred_pfn != ULONG_MAX;
2278+
/*
2279+
* Initialize at least nr_pages_needed in section chunks.
2280+
* If a section has less free memory than nr_pages_needed, the next
2281+
* section will be also initialized.
2282+
* Note, that it still does not guarantee that allocation of order can
2283+
* be satisfied if the sections are fragmented because of memblock
2284+
* allocations.
2285+
*/
2286+
for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1);
2287+
nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
2288+
spfn = epfn, epfn += PAGES_PER_SECTION) {
2289+
nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone);
22722290
}
22732291

22742292
/*
2275-
* Initialize and free pages in MAX_PAGE_ORDER sized increments so
2276-
* that we can avoid introducing any issues with the buddy
2277-
* allocator.
2293+
* There were no pages to initialize and free which means the zone's
2294+
* memory map is completely initialized.
22782295
*/
2279-
while (spfn < epfn) {
2280-
/* update our first deferred PFN for this section */
2281-
first_deferred_pfn = spfn;
2282-
2283-
nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2284-
touch_nmi_watchdog();
2285-
2286-
/* We should only stop along section boundaries */
2287-
if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2288-
continue;
2289-
2290-
/* If our quota has been met we can stop here */
2291-
if (nr_pages >= nr_pages_needed)
2292-
break;
2293-
}
2296+
pgdat->first_deferred_pfn = nr_pages ? spfn : ULONG_MAX;
22942297

2295-
pgdat->first_deferred_pfn = spfn;
22962298
pgdat_resize_unlock(pgdat, &flags);
22972299

22982300
return nr_pages > 0;

0 commit comments

Comments
 (0)