Skip to content

Commit a956f4e

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Three arm64 fixes for -rc8/final. The MTE and stolen time fixes have been doing the rounds for a little while, but review and testing feedback was ongoing until earlier this week. The kexec fix showed up on Monday and addresses a failure observed under Qemu. Summary: - Add missing write barrier to publish MTE tags before a pte update - Fix kexec relocation clobbering its own data structures - Fix stolen time crash if a timer IRQ fires during CPU hotplug" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: mte: Ensure the cleared tags are visible before setting the PTE arm64: kexec: load from kimage prior to clobbering arm64: paravirt: Use RCU read locks to guard stolen_time
2 parents 3d7285a + 1d0cb4c commit a956f4e

3 files changed

Lines changed: 39 additions & 15 deletions

File tree

arch/arm64/kernel/mte.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
7676
mte_sync_page_tags(page, old_pte, check_swap,
7777
pte_is_tagged);
7878
}
79+
80+
/* ensure the tags are visible before the PTE is set */
81+
smp_wmb();
7982
}
8083

8184
int memcmp_pages(struct page *page1, struct page *page2)

arch/arm64/kernel/paravirt.c

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ static u64 native_steal_clock(int cpu)
3535
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
3636

3737
struct pv_time_stolen_time_region {
38-
struct pvclock_vcpu_stolen_time *kaddr;
38+
struct pvclock_vcpu_stolen_time __rcu *kaddr;
3939
};
4040

4141
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
@@ -52,7 +52,9 @@ early_param("no-steal-acc", parse_no_stealacc);
5252
/* return stolen time in ns by asking the hypervisor */
5353
static u64 para_steal_clock(int cpu)
5454
{
55+
struct pvclock_vcpu_stolen_time *kaddr = NULL;
5556
struct pv_time_stolen_time_region *reg;
57+
u64 ret = 0;
5658

5759
reg = per_cpu_ptr(&stolen_time_region, cpu);
5860

@@ -61,28 +63,37 @@ static u64 para_steal_clock(int cpu)
6163
* online notification callback runs. Until the callback
6264
* has run we just return zero.
6365
*/
64-
if (!reg->kaddr)
66+
rcu_read_lock();
67+
kaddr = rcu_dereference(reg->kaddr);
68+
if (!kaddr) {
69+
rcu_read_unlock();
6570
return 0;
71+
}
6672

67-
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
73+
ret = le64_to_cpu(READ_ONCE(kaddr->stolen_time));
74+
rcu_read_unlock();
75+
return ret;
6876
}
6977

7078
static int stolen_time_cpu_down_prepare(unsigned int cpu)
7179
{
80+
struct pvclock_vcpu_stolen_time *kaddr = NULL;
7281
struct pv_time_stolen_time_region *reg;
7382

7483
reg = this_cpu_ptr(&stolen_time_region);
7584
if (!reg->kaddr)
7685
return 0;
7786

78-
memunmap(reg->kaddr);
79-
memset(reg, 0, sizeof(*reg));
87+
kaddr = rcu_replace_pointer(reg->kaddr, NULL, true);
88+
synchronize_rcu();
89+
memunmap(kaddr);
8090

8191
return 0;
8292
}
8393

8494
static int stolen_time_cpu_online(unsigned int cpu)
8595
{
96+
struct pvclock_vcpu_stolen_time *kaddr = NULL;
8697
struct pv_time_stolen_time_region *reg;
8798
struct arm_smccc_res res;
8899

@@ -93,17 +104,19 @@ static int stolen_time_cpu_online(unsigned int cpu)
93104
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
94105
return -EINVAL;
95106

96-
reg->kaddr = memremap(res.a0,
107+
kaddr = memremap(res.a0,
97108
sizeof(struct pvclock_vcpu_stolen_time),
98109
MEMREMAP_WB);
99110

111+
rcu_assign_pointer(reg->kaddr, kaddr);
112+
100113
if (!reg->kaddr) {
101114
pr_warn("Failed to map stolen time data structure\n");
102115
return -ENOMEM;
103116
}
104117

105-
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
106-
le32_to_cpu(reg->kaddr->attributes) != 0) {
118+
if (le32_to_cpu(kaddr->revision) != 0 ||
119+
le32_to_cpu(kaddr->attributes) != 0) {
107120
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
108121
return -ENXIO;
109122
}

arch/arm64/kernel/relocate_kernel.S

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,15 @@
3737
* safe memory that has been set up to be preserved during the copy operation.
3838
*/
3939
SYM_CODE_START(arm64_relocate_new_kernel)
40+
/*
41+
* The kimage structure isn't allocated specially and may be clobbered
42+
* during relocation. We must load any values we need from it prior to
43+
* any relocation occurring.
44+
*/
45+
ldr x28, [x0, #KIMAGE_START]
46+
ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
47+
ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
48+
4049
/* Setup the list loop variables. */
4150
ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
4251
ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
@@ -72,21 +81,20 @@ SYM_CODE_START(arm64_relocate_new_kernel)
7281
ic iallu
7382
dsb nsh
7483
isb
75-
ldr x4, [x0, #KIMAGE_START] /* relocation start */
76-
ldr x1, [x0, #KIMAGE_ARCH_EL2_VECTORS] /* relocation start */
77-
ldr x0, [x0, #KIMAGE_ARCH_DTB_MEM] /* dtb address */
7884
turn_off_mmu x12, x13
7985

8086
/* Start new image. */
81-
cbz x1, .Lel1
82-
mov x1, x4 /* relocation start */
83-
mov x2, x0 /* dtb address */
87+
cbz x27, .Lel1
88+
mov x1, x28 /* kernel entry point */
89+
mov x2, x26 /* dtb address */
8490
mov x3, xzr
8591
mov x4, xzr
8692
mov x0, #HVC_SOFT_RESTART
8793
hvc #0 /* Jumps from el2 */
8894
.Lel1:
95+
mov x0, x26 /* dtb address */
96+
mov x1, xzr
8997
mov x2, xzr
9098
mov x3, xzr
91-
br x4 /* Jumps from el1 */
99+
br x28 /* Jumps from el1 */
92100
SYM_CODE_END(arm64_relocate_new_kernel)

0 commit comments

Comments
 (0)