Skip to content

Commit 77fb622

Browse files
committed
Merge tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton: "Six hotfixes. The page_table_check one from Miaohe Lin is considered a minor thing so it isn't marked for -stable. The remainder address pre-5.19 issues and are cc:stable" * tag 'mm-hotfixes-stable-2022-05-27' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/page_table_check: fix accessing unmapped ptep kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add] mm/page_alloc: always attempt to allocate at least one page during bulk allocation hugetlb: fix huge_pmd_unshare address update zsmalloc: fix races between asynchronous zspage free and page migration Revert "mm/cma.c: remove redundant cma_mutex lock"
2 parents 6f66404 + 24c8e27 commit 77fb622

9 files changed

Lines changed: 103 additions & 51 deletions

File tree

arch/s390/include/asm/kexec.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
#ifndef _S390_KEXEC_H
1010
#define _S390_KEXEC_H
1111

12+
#include <linux/module.h>
13+
1214
#include <asm/processor.h>
1315
#include <asm/page.h>
1416
#include <asm/setup.h>
@@ -83,4 +85,12 @@ struct kimage_arch {
8385
extern const struct kexec_file_ops s390_kexec_image_ops;
8486
extern const struct kexec_file_ops s390_kexec_elf_ops;
8587

88+
#ifdef CONFIG_KEXEC_FILE
89+
struct purgatory_info;
90+
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
91+
Elf_Shdr *section,
92+
const Elf_Shdr *relsec,
93+
const Elf_Shdr *symtab);
94+
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
95+
#endif
8696
#endif /*_S390_KEXEC_H */

arch/x86/include/asm/kexec.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
186186
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
187187
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
188188

189+
#ifdef CONFIG_KEXEC_FILE
190+
struct purgatory_info;
191+
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
192+
Elf_Shdr *section,
193+
const Elf_Shdr *relsec,
194+
const Elf_Shdr *symtab);
195+
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
196+
#endif
189197
#endif
190198

191199
typedef void crash_vmclear_fn(void);

include/linux/kexec.h

Lines changed: 38 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -193,14 +193,6 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
193193
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
194194
unsigned long buf_len);
195195
void *arch_kexec_kernel_image_load(struct kimage *image);
196-
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
197-
Elf_Shdr *section,
198-
const Elf_Shdr *relsec,
199-
const Elf_Shdr *symtab);
200-
int arch_kexec_apply_relocations(struct purgatory_info *pi,
201-
Elf_Shdr *section,
202-
const Elf_Shdr *relsec,
203-
const Elf_Shdr *symtab);
204196
int arch_kimage_file_post_load_cleanup(struct kimage *image);
205197
#ifdef CONFIG_KEXEC_SIG
206198
int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
@@ -229,6 +221,44 @@ extern int crash_exclude_mem_range(struct crash_mem *mem,
229221
unsigned long long mend);
230222
extern int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
231223
void **addr, unsigned long *sz);
224+
225+
#ifndef arch_kexec_apply_relocations_add
226+
/*
227+
* arch_kexec_apply_relocations_add - apply relocations of type RELA
228+
* @pi: Purgatory to be relocated.
229+
* @section: Section relocations applying to.
230+
* @relsec: Section containing RELAs.
231+
* @symtab: Corresponding symtab.
232+
*
233+
* Return: 0 on success, negative errno on error.
234+
*/
235+
static inline int
236+
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
237+
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
238+
{
239+
pr_err("RELA relocation unsupported.\n");
240+
return -ENOEXEC;
241+
}
242+
#endif
243+
244+
#ifndef arch_kexec_apply_relocations
245+
/*
246+
* arch_kexec_apply_relocations - apply relocations of type REL
247+
* @pi: Purgatory to be relocated.
248+
* @section: Section relocations applying to.
249+
* @relsec: Section containing RELs.
250+
* @symtab: Corresponding symtab.
251+
*
252+
* Return: 0 on success, negative errno on error.
253+
*/
254+
static inline int
255+
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
256+
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
257+
{
258+
pr_err("REL relocation unsupported.\n");
259+
return -ENOEXEC;
260+
}
261+
#endif
232262
#endif /* CONFIG_KEXEC_FILE */
233263

234264
#ifdef CONFIG_KEXEC_ELF

kernel/kexec_file.c

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -108,40 +108,6 @@ int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
108108
}
109109
#endif
110110

111-
/*
112-
* arch_kexec_apply_relocations_add - apply relocations of type RELA
113-
* @pi: Purgatory to be relocated.
114-
* @section: Section relocations applying to.
115-
* @relsec: Section containing RELAs.
116-
* @symtab: Corresponding symtab.
117-
*
118-
* Return: 0 on success, negative errno on error.
119-
*/
120-
int __weak
121-
arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
122-
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
123-
{
124-
pr_err("RELA relocation unsupported.\n");
125-
return -ENOEXEC;
126-
}
127-
128-
/*
129-
* arch_kexec_apply_relocations - apply relocations of type REL
130-
* @pi: Purgatory to be relocated.
131-
* @section: Section relocations applying to.
132-
* @relsec: Section containing RELs.
133-
* @symtab: Corresponding symtab.
134-
*
135-
* Return: 0 on success, negative errno on error.
136-
*/
137-
int __weak
138-
arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
139-
const Elf_Shdr *relsec, const Elf_Shdr *symtab)
140-
{
141-
pr_err("REL relocation unsupported.\n");
142-
return -ENOEXEC;
143-
}
144-
145111
/*
146112
* Free up memory used by kernel, initrd, and command line. This is temporary
147113
* memory allocation which is not needed any more after these buffers have

mm/cma.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737

3838
struct cma cma_areas[MAX_CMA_AREAS];
3939
unsigned cma_area_count;
40+
static DEFINE_MUTEX(cma_mutex);
4041

4142
phys_addr_t cma_get_base(const struct cma *cma)
4243
{
@@ -468,9 +469,10 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
468469
spin_unlock_irq(&cma->lock);
469470

470471
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
472+
mutex_lock(&cma_mutex);
471473
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
472474
GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0));
473-
475+
mutex_unlock(&cma_mutex);
474476
if (ret == 0) {
475477
page = pfn_to_page(pfn);
476478
break;

mm/hugetlb.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6755,7 +6755,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
67556755
pud_clear(pud);
67566756
put_page(virt_to_page(ptep));
67576757
mm_dec_nr_pmds(mm);
6758-
*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
6758+
/*
6759+
* This update of passed address optimizes loops sequentially
6760+
* processing addresses in increments of huge page size (PMD_SIZE
6761+
* in this case). By clearing the pud, a PUD_SIZE area is unmapped.
6762+
* Update address to the 'last page' in the cleared area so that
6763+
* calling loop can move to first page past this area.
6764+
*/
6765+
*addr |= PUD_SIZE - PMD_SIZE;
67596766
return 1;
67606767
}
67616768

mm/page_alloc.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5324,8 +5324,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
53245324
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
53255325
pcp, pcp_list);
53265326
if (unlikely(!page)) {
5327-
/* Try and get at least one page */
5328-
if (!nr_populated)
5327+
/* Try and allocate at least one page */
5328+
if (!nr_account)
53295329
goto failed_irq;
53305330
break;
53315331
}

mm/page_table_check.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,11 +234,11 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm,
234234
pte_t *ptep = pte_offset_map(&pmd, addr);
235235
unsigned long i;
236236

237-
pte_unmap(ptep);
238237
for (i = 0; i < PTRS_PER_PTE; i++) {
239238
__page_table_check_pte_clear(mm, addr, *ptep);
240239
addr += PAGE_SIZE;
241240
ptep++;
242241
}
242+
pte_unmap(ptep - PTRS_PER_PTE);
243243
}
244244
}

mm/zsmalloc.c

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
17181718
*/
17191719
static void lock_zspage(struct zspage *zspage)
17201720
{
1721-
struct page *page = get_first_page(zspage);
1721+
struct page *curr_page, *page;
17221722

1723-
do {
1724-
lock_page(page);
1725-
} while ((page = get_next_page(page)) != NULL);
1723+
/*
1724+
* Pages we haven't locked yet can be migrated off the list while we're
1725+
* trying to lock them, so we need to be careful and only attempt to
1726+
* lock each page under migrate_read_lock(). Otherwise, the page we lock
1727+
* may no longer belong to the zspage. This means that we may wait for
1728+
* the wrong page to unlock, so we must take a reference to the page
1729+
* prior to waiting for it to unlock outside migrate_read_lock().
1730+
*/
1731+
while (1) {
1732+
migrate_read_lock(zspage);
1733+
page = get_first_page(zspage);
1734+
if (trylock_page(page))
1735+
break;
1736+
get_page(page);
1737+
migrate_read_unlock(zspage);
1738+
wait_on_page_locked(page);
1739+
put_page(page);
1740+
}
1741+
1742+
curr_page = page;
1743+
while ((page = get_next_page(curr_page))) {
1744+
if (trylock_page(page)) {
1745+
curr_page = page;
1746+
} else {
1747+
get_page(page);
1748+
migrate_read_unlock(zspage);
1749+
wait_on_page_locked(page);
1750+
put_page(page);
1751+
migrate_read_lock(zspage);
1752+
}
1753+
}
1754+
migrate_read_unlock(zspage);
17261755
}
17271756

17281757
static int zs_init_fs_context(struct fs_context *fc)

0 commit comments

Comments
 (0)