Skip to content

Commit cfcaa66

Browse files
Ben Widawskytorvalds
authored andcommitted
mm/hugetlb: add support for mempolicy MPOL_PREFERRED_MANY
Implement the missing huge page allocation functionality while obeying the preferred node semantics. This is similar to the implementation for general page allocation, as it uses a fallback mechanism to try multiple preferred nodes first, and then all other nodes. To avoid adding too many "#ifdef CONFIG_NUMA" check, add a helper function in mempolicy.h to check whether a mempolicy is MPOL_PREFERRED_MANY. [akpm@linux-foundation.org: fix compiling issue when merging with other hugetlb patch] [Thanks to 0day bot for catching the !CONFIG_NUMA compiling issue] [mhocko@suse.com: suggest to remove the #ifdef CONFIG_NUMA check] [ben.widawsky@intel.com: add helpers to avoid ifdefs] Link: https://lore.kernel.org/r/20200630212517.308045-12-ben.widawsky@intel.com Link: https://lkml.kernel.org/r/1627970362-61305-4-git-send-email-feng.tang@intel.com Link: https://lkml.kernel.org/r/20210809024430.GA46432@shbuild999.sh.intel.com [nathan@kernel.org: initialize page to NULL in alloc_buddy_huge_page_with_mpol()] Link: https://lkml.kernel.org/r/20210810200632.3812797-1-nathan@kernel.org Link: https://lore.kernel.org/r/20200630212517.308045-12-ben.widawsky@intel.com Link: https://lkml.kernel.org/r/1627970362-61305-4-git-send-email-feng.tang@intel.com Link: https://lkml.kernel.org/r/20210809024430.GA46432@shbuild999.sh.intel.com Signed-off-by: Ben Widawsky <ben.widawsky@intel.com> Signed-off-by: Feng Tang <feng.tang@intel.com> Signed-off-by: Nathan Chancellor <nathan@kernel.org> Co-developed-by: Feng Tang <feng.tang@intel.com> Suggested-by: Michal Hocko <mhocko@suse.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 4c54d94 commit cfcaa66

2 files changed

Lines changed: 37 additions & 5 deletions

File tree

include/linux/mempolicy.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,12 @@ extern void mpol_put_task_policy(struct task_struct *);
186186

187187
extern bool numa_demotion_enabled;
188188

189+
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
190+
{
191+
return (pol->mode == MPOL_PREFERRED_MANY);
192+
}
193+
194+
189195
#else
190196

191197
struct mempolicy {};
@@ -296,5 +302,11 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
296302
}
297303

298304
#define numa_demotion_enabled false
305+
306+
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
307+
{
308+
return false;
309+
}
310+
299311
#endif /* CONFIG_NUMA */
300312
#endif

mm/hugetlb.c

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1145,7 +1145,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
11451145
unsigned long address, int avoid_reserve,
11461146
long chg)
11471147
{
1148-
struct page *page;
1148+
struct page *page = NULL;
11491149
struct mempolicy *mpol;
11501150
gfp_t gfp_mask;
11511151
nodemask_t *nodemask;
@@ -1166,7 +1166,17 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
11661166

11671167
gfp_mask = htlb_alloc_mask(h);
11681168
nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1169-
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1169+
1170+
if (mpol_is_preferred_many(mpol)) {
1171+
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1172+
1173+
/* Fallback to all nodes if page==NULL */
1174+
nodemask = NULL;
1175+
}
1176+
1177+
if (!page)
1178+
page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1179+
11701180
if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
11711181
SetHPageRestoreReserve(page);
11721182
h->resv_huge_pages--;
@@ -2142,16 +2152,26 @@ static
21422152
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
21432153
struct vm_area_struct *vma, unsigned long addr)
21442154
{
2145-
struct page *page;
2155+
struct page *page = NULL;
21462156
struct mempolicy *mpol;
21472157
gfp_t gfp_mask = htlb_alloc_mask(h);
21482158
int nid;
21492159
nodemask_t *nodemask;
21502160

21512161
nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2152-
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2153-
mpol_cond_put(mpol);
2162+
if (mpol_is_preferred_many(mpol)) {
2163+
gfp_t gfp = gfp_mask | __GFP_NOWARN;
2164+
2165+
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2166+
page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
21542167

2168+
/* Fallback to all nodes if page==NULL */
2169+
nodemask = NULL;
2170+
}
2171+
2172+
if (!page)
2173+
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
2174+
mpol_cond_put(mpol);
21552175
return page;
21562176
}
21572177

0 commit comments

Comments
 (0)