Skip to content

Commit b45e2da

Browse files
committed
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 patches. Subsystems affected by this patch series: MAINTAINERS and mm (slub, pagealloc, memcg, kasan, vmalloc, migration, hugetlb, memory-failure, and process_vm_access)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/process_vm_access.c: include compat.h mm,hwpoison: fix printing of page flags MAINTAINERS: add Vlastimil as slab allocators maintainer mm/hugetlb: fix potential missing huge page size info mm: migrate: initialize err in do_migrate_pages mm/vmalloc.c: fix potential memory leak arm/kasan: fix the array size of kasan_early_shadow_pte[] mm/memcontrol: fix warning in mem_cgroup_page_lruvec() mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint mm, slub: consider rest of partial list if acquire_slab() fails
2 parents 8cbe71e + eb351d7 commit b45e2da

11 files changed

Lines changed: 33 additions & 23 deletions

File tree

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16313,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org>
1631316313
M: David Rientjes <rientjes@google.com>
1631416314
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
1631516315
M: Andrew Morton <akpm@linux-foundation.org>
16316+
M: Vlastimil Babka <vbabka@suse.cz>
1631616317
L: linux-mm@kvack.org
1631716318
S: Maintained
1631816319
F: include/linux/sl?b*.h

include/linux/kasan.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
3535
#define KASAN_SHADOW_INIT 0
3636
#endif
3737

38+
#ifndef PTE_HWTABLE_PTRS
39+
#define PTE_HWTABLE_PTRS 0
40+
#endif
41+
3842
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
39-
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
43+
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
4044
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
4145
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
4246
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];

include/linux/memcontrol.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
665665
{
666666
struct mem_cgroup *memcg = page_memcg(page);
667667

668-
VM_WARN_ON_ONCE_PAGE(!memcg, page);
668+
VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
669669
return mem_cgroup_lruvec(memcg, pgdat);
670670
}
671671

mm/hugetlb.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4371,7 +4371,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
43714371
* So we need to block hugepage fault by PG_hwpoison bit check.
43724372
*/
43734373
if (unlikely(PageHWPoison(page))) {
4374-
ret = VM_FAULT_HWPOISON |
4374+
ret = VM_FAULT_HWPOISON_LARGE |
43754375
VM_FAULT_SET_HINDEX(hstate_index(h));
43764376
goto backout_unlocked;
43774377
}

mm/kasan/init.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
6464
return false;
6565
}
6666
#endif
67-
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
67+
pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
68+
__page_aligned_bss;
6869

6970
static inline bool kasan_pte_table(pmd_t pmd)
7071
{

mm/memory-failure.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1940,7 +1940,7 @@ int soft_offline_page(unsigned long pfn, int flags)
19401940
goto retry;
19411941
}
19421942
} else if (ret == -EIO) {
1943-
pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n",
1943+
pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
19441944
__func__, pfn, page->flags, &page->flags);
19451945
}
19461946

mm/mempolicy.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11111111
const nodemask_t *to, int flags)
11121112
{
11131113
int busy = 0;
1114-
int err;
1114+
int err = 0;
11151115
nodemask_t tmp;
11161116

11171117
migrate_prep();

mm/page_alloc.c

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
28622862
{
28632863
struct page *page;
28642864

2865-
#ifdef CONFIG_CMA
2866-
/*
2867-
* Balance movable allocations between regular and CMA areas by
2868-
* allocating from CMA when over half of the zone's free memory
2869-
* is in the CMA area.
2870-
*/
2871-
if (alloc_flags & ALLOC_CMA &&
2872-
zone_page_state(zone, NR_FREE_CMA_PAGES) >
2873-
zone_page_state(zone, NR_FREE_PAGES) / 2) {
2874-
page = __rmqueue_cma_fallback(zone, order);
2875-
if (page)
2876-
return page;
2865+
if (IS_ENABLED(CONFIG_CMA)) {
2866+
/*
2867+
* Balance movable allocations between regular and CMA areas by
2868+
* allocating from CMA when over half of the zone's free memory
2869+
* is in the CMA area.
2870+
*/
2871+
if (alloc_flags & ALLOC_CMA &&
2872+
zone_page_state(zone, NR_FREE_CMA_PAGES) >
2873+
zone_page_state(zone, NR_FREE_PAGES) / 2) {
2874+
page = __rmqueue_cma_fallback(zone, order);
2875+
if (page)
2876+
goto out;
2877+
}
28772878
}
2878-
#endif
28792879
retry:
28802880
page = __rmqueue_smallest(zone, order, migratetype);
28812881
if (unlikely(!page)) {
@@ -2886,8 +2886,9 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
28862886
alloc_flags))
28872887
goto retry;
28882888
}
2889-
2890-
trace_mm_page_alloc_zone_locked(page, order, migratetype);
2889+
out:
2890+
if (page)
2891+
trace_mm_page_alloc_zone_locked(page, order, migratetype);
28912892
return page;
28922893
}
28932894

mm/process_vm_access.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <linux/mm.h>
1010
#include <linux/uio.h>
1111
#include <linux/sched.h>
12+
#include <linux/compat.h>
1213
#include <linux/sched/mm.h>
1314
#include <linux/highmem.h>
1415
#include <linux/ptrace.h>

mm/slub.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
19731973

19741974
t = acquire_slab(s, n, page, object == NULL, &objects);
19751975
if (!t)
1976-
break;
1976+
continue; /* cmpxchg raced */
19771977

19781978
available += objects;
19791979
if (!object) {

0 commit comments

Comments
 (0)