Skip to content

Commit 10e0f75

Browse files
YWHyukakpm00
authored andcommitted
mm/page_alloc: fix tracepoint mm_page_alloc_zone_locked()
Currently, trace point mm_page_alloc_zone_locked() doesn't show correct information. First, when alloc_flag has ALLOC_HARDER/ALLOC_CMA, page can be allocated from MIGRATE_HIGHATOMIC/MIGRATE_CMA. Nevertheless, tracepoint use requested migration type not MIGRATE_HIGHATOMIC and MIGRATE_CMA. Second, after commit 44042b4 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") percpu-list can store high order pages. But trace point determine whether it is a refiil of percpu-list by comparing requested order and 0. To handle these problems, make mm_page_alloc_zone_locked() only be called by __rmqueue_smallest with correct migration type. With a new argument called percpu_refill, it can show roughly whether it is a refill of percpu-list. Link: https://lkml.kernel.org/r/20220512025307.57924-1-vvghjk1234@gmail.com Signed-off-by: Wonhyuk Yang <vvghjk1234@gmail.com> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Baik Song An <bsahn@etri.re.kr> Cc: Hong Yeon Kim <kimhy@etri.re.kr> Cc: Taeung Song <taeung@reallinux.co.kr> Cc: <linuxgeek@linuxgeek.io> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 3645b5e commit 10e0f75

2 files changed

Lines changed: 14 additions & 13 deletions

File tree

include/trace/events/kmem.h

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -229,35 +229,39 @@ TRACE_EVENT(mm_page_alloc,
229229

230230
DECLARE_EVENT_CLASS(mm_page,
231231

232-
TP_PROTO(struct page *page, unsigned int order, int migratetype),
232+
TP_PROTO(struct page *page, unsigned int order, int migratetype,
233+
int percpu_refill),
233234

234-
TP_ARGS(page, order, migratetype),
235+
TP_ARGS(page, order, migratetype, percpu_refill),
235236

236237
TP_STRUCT__entry(
237238
__field( unsigned long, pfn )
238239
__field( unsigned int, order )
239240
__field( int, migratetype )
241+
__field( int, percpu_refill )
240242
),
241243

242244
TP_fast_assign(
243245
__entry->pfn = page ? page_to_pfn(page) : -1UL;
244246
__entry->order = order;
245247
__entry->migratetype = migratetype;
248+
__entry->percpu_refill = percpu_refill;
246249
),
247250

248251
TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d",
249252
__entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL,
250253
__entry->pfn != -1UL ? __entry->pfn : 0,
251254
__entry->order,
252255
__entry->migratetype,
253-
__entry->order == 0)
256+
__entry->percpu_refill)
254257
);
255258

256259
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
257260

258-
TP_PROTO(struct page *page, unsigned int order, int migratetype),
261+
TP_PROTO(struct page *page, unsigned int order, int migratetype,
262+
int percpu_refill),
259263

260-
TP_ARGS(page, order, migratetype)
264+
TP_ARGS(page, order, migratetype, percpu_refill)
261265
);
262266

263267
TRACE_EVENT(mm_page_pcpu_drain,

mm/page_alloc.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2466,6 +2466,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
24662466
del_page_from_free_list(page, zone, current_order);
24672467
expand(zone, page, order, current_order, migratetype);
24682468
set_pcppage_migratetype(page, migratetype);
2469+
trace_mm_page_alloc_zone_locked(page, order, migratetype,
2470+
pcp_allowed_order(order) &&
2471+
migratetype < MIGRATE_PCPTYPES);
24692472
return page;
24702473
}
24712474

@@ -2989,7 +2992,7 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
29892992
zone_page_state(zone, NR_FREE_PAGES) / 2) {
29902993
page = __rmqueue_cma_fallback(zone, order);
29912994
if (page)
2992-
goto out;
2995+
return page;
29932996
}
29942997
}
29952998
retry:
@@ -3002,9 +3005,6 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
30023005
alloc_flags))
30033006
goto retry;
30043007
}
3005-
out:
3006-
if (page)
3007-
trace_mm_page_alloc_zone_locked(page, order, migratetype);
30083008
return page;
30093009
}
30103010

@@ -3723,11 +3723,8 @@ struct page *rmqueue(struct zone *preferred_zone,
37233723
* reserved for high-order atomic allocation, so order-0
37243724
* request should skip it.
37253725
*/
3726-
if (order > 0 && alloc_flags & ALLOC_HARDER) {
3726+
if (order > 0 && alloc_flags & ALLOC_HARDER)
37273727
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3728-
if (page)
3729-
trace_mm_page_alloc_zone_locked(page, order, migratetype);
3730-
}
37313728
if (!page) {
37323729
page = __rmqueue(zone, order, migratetype, alloc_flags);
37333730
if (!page)

0 commit comments

Comments
 (0)