Skip to content

Commit 85c653f

Browse files
author
Marc Zyngier
committed
Merge branch arm64/for-next/caches into kvmarm-master/next
arm64 cache management function cleanup from Fuad Tabba, shared with the arm64 tree. * arm64/for-next/caches: arm64: Rename arm64-internal cache maintenance functions arm64: Fix cache maintenance function comments arm64: sync_icache_aliases to take end parameter instead of size arm64: __clean_dcache_area_pou to take end parameter instead of size arm64: __clean_dcache_area_pop to take end parameter instead of size arm64: __clean_dcache_area_poc to take end parameter instead of size arm64: __flush_dcache_area to take end parameter instead of size arm64: dcache_by_line_op to take end parameter instead of size arm64: __inval_dcache_area to take end parameter instead of size arm64: Fix comments to refer to correct function __flush_icache_range arm64: Move documentation of dcache_by_line_op arm64: assembler: remove user_alt arm64: Downgrade flush_icache_range to invalidate arm64: Do not enable uaccess for invalidate_icache_range arm64: Do not enable uaccess for flush_icache_range arm64: Apply errata to swsusp_arch_suspend_exit arm64: assembler: add conditional cache fixups arm64: assembler: replace `kaddr` with `addr` Signed-off-by: Marc Zyngier <maz@kernel.org>
2 parents cb5faa8 + fade9c2 commit 85c653f

28 files changed

Lines changed: 282 additions & 221 deletions

arch/arm64/include/asm/alternative-macros.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -197,11 +197,6 @@ alternative_endif
197197
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
198198
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
199199

200-
.macro user_alt, label, oldinstr, newinstr, cond
201-
9999: alternative_insn "\oldinstr", "\newinstr", \cond
202-
_asm_extable 9999b, \label
203-
.endm
204-
205200
#endif /* __ASSEMBLY__ */
206201

207202
/*

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
124124
#define gic_read_lpir(c) readq_relaxed(c)
125125
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
126126

127-
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
127+
#define gic_flush_dcache_to_poc(a,l) \
128+
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
128129

129130
#define gits_read_baser(c) readq_relaxed(c)
130131
#define gits_write_baser(v, c) writeq_relaxed(v, c)

arch/arm64/include/asm/assembler.h

Lines changed: 48 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -130,15 +130,27 @@ alternative_endif
130130
.endm
131131

132132
/*
133-
* Emit an entry into the exception table
133+
* Create an exception table entry for `insn`, which will branch to `fixup`
134+
* when an unhandled fault is taken.
134135
*/
135-
.macro _asm_extable, from, to
136+
.macro _asm_extable, insn, fixup
136137
.pushsection __ex_table, "a"
137138
.align 3
138-
.long (\from - .), (\to - .)
139+
.long (\insn - .), (\fixup - .)
139140
.popsection
140141
.endm
141142

143+
/*
144+
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
145+
* do nothing.
146+
*/
147+
.macro _cond_extable, insn, fixup
148+
.ifnc \fixup,
149+
_asm_extable \insn, \fixup
150+
.endif
151+
.endm
152+
153+
142154
#define USER(l, x...) \
143155
9999: x; \
144156
_asm_extable 9999b, l
@@ -375,72 +387,76 @@ alternative_cb_end
375387
bfi \tcr, \tmp0, \pos, #3
376388
.endm
377389

390+
.macro __dcache_op_workaround_clean_cache, op, addr
391+
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
392+
dc \op, \addr
393+
alternative_else
394+
dc civac, \addr
395+
alternative_endif
396+
.endm
397+
378398
/*
379399
* Macro to perform a data cache maintenance for the interval
380-
* [kaddr, kaddr + size)
400+
* [start, end)
381401
*
382402
* op: operation passed to dc instruction
383403
* domain: domain used in dsb instruciton
384-
* kaddr: starting virtual address of the region
385-
* size: size of the region
386-
* Corrupts: kaddr, size, tmp1, tmp2
404+
* start: starting virtual address of the region
405+
* end: end virtual address of the region
406+
* fixup: optional label to branch to on user fault
407+
* Corrupts: start, end, tmp1, tmp2
387408
*/
388-
.macro __dcache_op_workaround_clean_cache, op, kaddr
389-
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
390-
dc \op, \kaddr
391-
alternative_else
392-
dc civac, \kaddr
393-
alternative_endif
394-
.endm
395-
396-
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
409+
.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
397410
dcache_line_size \tmp1, \tmp2
398-
add \size, \kaddr, \size
399411
sub \tmp2, \tmp1, #1
400-
bic \kaddr, \kaddr, \tmp2
401-
9998:
412+
bic \start, \start, \tmp2
413+
.Ldcache_op\@:
402414
.ifc \op, cvau
403-
__dcache_op_workaround_clean_cache \op, \kaddr
415+
__dcache_op_workaround_clean_cache \op, \start
404416
.else
405417
.ifc \op, cvac
406-
__dcache_op_workaround_clean_cache \op, \kaddr
418+
__dcache_op_workaround_clean_cache \op, \start
407419
.else
408420
.ifc \op, cvap
409-
sys 3, c7, c12, 1, \kaddr // dc cvap
421+
sys 3, c7, c12, 1, \start // dc cvap
410422
.else
411423
.ifc \op, cvadp
412-
sys 3, c7, c13, 1, \kaddr // dc cvadp
424+
sys 3, c7, c13, 1, \start // dc cvadp
413425
.else
414-
dc \op, \kaddr
426+
dc \op, \start
415427
.endif
416428
.endif
417429
.endif
418430
.endif
419-
add \kaddr, \kaddr, \tmp1
420-
cmp \kaddr, \size
421-
b.lo 9998b
431+
add \start, \start, \tmp1
432+
cmp \start, \end
433+
b.lo .Ldcache_op\@
422434
dsb \domain
435+
436+
_cond_extable .Ldcache_op\@, \fixup
423437
.endm
424438

425439
/*
426440
* Macro to perform an instruction cache maintenance for the interval
427441
* [start, end)
428442
*
429443
* start, end: virtual addresses describing the region
430-
* label: A label to branch to on user fault.
444+
* fixup: optional label to branch to on user fault
431445
* Corrupts: tmp1, tmp2
432446
*/
433-
.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
447+
.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
434448
icache_line_size \tmp1, \tmp2
435449
sub \tmp2, \tmp1, #1
436450
bic \tmp2, \start, \tmp2
437-
9997:
438-
USER(\label, ic ivau, \tmp2) // invalidate I line PoU
451+
.Licache_op\@:
452+
ic ivau, \tmp2 // invalidate I line PoU
439453
add \tmp2, \tmp2, \tmp1
440454
cmp \tmp2, \end
441-
b.lo 9997b
455+
b.lo .Licache_op\@
442456
dsb ish
443457
isb
458+
459+
_cond_extable .Licache_op\@, \fixup
444460
.endm
445461

446462
/*

arch/arm64/include/asm/cacheflush.h

Lines changed: 42 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -30,45 +30,58 @@
3030
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
3131
* VIPT I-cache.
3232
*
33-
* flush_icache_range(start, end)
33+
* All functions below apply to the interval [start, end)
34+
* - start - virtual start address (inclusive)
35+
* - end - virtual end address (exclusive)
3436
*
35-
* Ensure coherency between the I-cache and the D-cache in the
36-
* region described by start, end.
37-
* - start - virtual start address
38-
* - end - virtual end address
37+
* caches_clean_inval_pou(start, end)
3938
*
40-
* invalidate_icache_range(start, end)
39+
* Ensure coherency between the I-cache and the D-cache region to
40+
* the Point of Unification.
4141
*
42-
* Invalidate the I-cache in the region described by start, end.
43-
* - start - virtual start address
44-
* - end - virtual end address
42+
* caches_clean_inval_user_pou(start, end)
4543
*
46-
* __flush_cache_user_range(start, end)
44+
* Ensure coherency between the I-cache and the D-cache region to
45+
* the Point of Unification.
46+
* Use only if the region might access user memory.
4747
*
48-
* Ensure coherency between the I-cache and the D-cache in the
49-
* region described by start, end.
50-
* - start - virtual start address
51-
* - end - virtual end address
48+
* icache_inval_pou(start, end)
5249
*
53-
* __flush_dcache_area(kaddr, size)
50+
* Invalidate I-cache region to the Point of Unification.
5451
*
55-
* Ensure that the data held in page is written back.
56-
* - kaddr - page address
57-
* - size - region size
52+
* dcache_clean_inval_poc(start, end)
53+
*
54+
* Clean and invalidate D-cache region to the Point of Coherency.
55+
*
56+
* dcache_inval_poc(start, end)
57+
*
58+
* Invalidate D-cache region to the Point of Coherency.
59+
*
60+
* dcache_clean_poc(start, end)
61+
*
62+
* Clean D-cache region to the Point of Coherency.
63+
*
64+
* dcache_clean_pop(start, end)
65+
*
66+
* Clean D-cache region to the Point of Persistence.
67+
*
68+
* dcache_clean_pou(start, end)
69+
*
70+
* Clean D-cache region to the Point of Unification.
5871
*/
59-
extern void __flush_icache_range(unsigned long start, unsigned long end);
60-
extern int invalidate_icache_range(unsigned long start, unsigned long end);
61-
extern void __flush_dcache_area(void *addr, size_t len);
62-
extern void __inval_dcache_area(void *addr, size_t len);
63-
extern void __clean_dcache_area_poc(void *addr, size_t len);
64-
extern void __clean_dcache_area_pop(void *addr, size_t len);
65-
extern void __clean_dcache_area_pou(void *addr, size_t len);
66-
extern long __flush_cache_user_range(unsigned long start, unsigned long end);
67-
extern void sync_icache_aliases(void *kaddr, unsigned long len);
72+
extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
73+
extern void icache_inval_pou(unsigned long start, unsigned long end);
74+
extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
75+
extern void dcache_inval_poc(unsigned long start, unsigned long end);
76+
extern void dcache_clean_poc(unsigned long start, unsigned long end);
77+
extern void dcache_clean_pop(unsigned long start, unsigned long end);
78+
extern void dcache_clean_pou(unsigned long start, unsigned long end);
79+
extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
80+
extern void sync_icache_aliases(unsigned long start, unsigned long end);
6881

6982
static inline void flush_icache_range(unsigned long start, unsigned long end)
7083
{
71-
__flush_icache_range(start, end);
84+
caches_clean_inval_pou(start, end);
7285

7386
/*
7487
* IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
122135
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
123136
extern void flush_dcache_page(struct page *);
124137

125-
static __always_inline void __flush_icache_all(void)
138+
static __always_inline void icache_inval_all_pou(void)
126139
{
127140
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
128141
return;

arch/arm64/include/asm/efi.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
137137

138138
static inline void efi_capsule_flush_cache_range(void *addr, int size)
139139
{
140-
__flush_dcache_area(addr, size);
140+
dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
141141
}
142142

143143
#endif /* _ASM_EFI_H */

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,8 @@ static inline void *__kvm_vector_slot2addr(void *base,
180180

181181
struct kvm;
182182

183-
#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
183+
#define kvm_flush_dcache_to_poc(a,l) \
184+
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
184185

185186
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
186187
{
@@ -205,11 +206,10 @@ static inline void __invalidate_icache_guest_page(void *va, size_t size)
205206
{
206207
if (icache_is_aliasing()) {
207208
/* any kind of VIPT cache */
208-
__flush_icache_all();
209+
icache_inval_all_pou();
209210
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
210211
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
211-
invalidate_icache_range((unsigned long)va,
212-
(unsigned long)va + size);
212+
icache_inval_pou((unsigned long)va, (unsigned long)va + size);
213213
}
214214
}
215215

arch/arm64/kernel/alternative.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ static void __nocfi __apply_alternatives(struct alt_region *region, bool is_modu
181181
*/
182182
if (!is_module) {
183183
dsb(ish);
184-
__flush_icache_all();
184+
icache_inval_all_pou();
185185
isb();
186186

187187
/* Ignore ARM64_CB bit from feature mask */

arch/arm64/kernel/efi-entry.S

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,17 @@ SYM_CODE_START(efi_enter_kernel)
2828
* stale icache entries from before relocation.
2929
*/
3030
ldr w1, =kernel_size
31-
bl __clean_dcache_area_poc
31+
add x1, x0, x1
32+
bl dcache_clean_poc
3233
ic ialluis
3334

3435
/*
3536
* Clean the remainder of this routine to the PoC
3637
* so that we can safely disable the MMU and caches.
3738
*/
3839
adr x0, 0f
39-
ldr w1, 3f
40-
bl __clean_dcache_area_poc
40+
adr x1, 3f
41+
bl dcache_clean_poc
4142
0:
4243
/* Turn off Dcache and MMU */
4344
mrs x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
6465
mov x2, xzr
6566
mov x3, xzr
6667
br x19
68+
3:
6769
SYM_CODE_END(efi_enter_kernel)
68-
3: .long . - 0b

arch/arm64/kernel/head.S

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -117,8 +117,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
117117
dmb sy // needed before dc ivac with
118118
// MMU off
119119

120-
mov x1, #0x20 // 4 x 8 bytes
121-
b __inval_dcache_area // tail call
120+
add x1, x0, #0x20 // 4 x 8 bytes
121+
b dcache_inval_poc // tail call
122122
SYM_CODE_END(preserve_boot_args)
123123

124124
/*
@@ -268,8 +268,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
268268
*/
269269
adrp x0, init_pg_dir
270270
adrp x1, init_pg_end
271-
sub x1, x1, x0
272-
bl __inval_dcache_area
271+
bl dcache_inval_poc
273272

274273
/*
275274
* Clear the init page tables.
@@ -382,13 +381,11 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
382381

383382
adrp x0, idmap_pg_dir
384383
adrp x1, idmap_pg_end
385-
sub x1, x1, x0
386-
bl __inval_dcache_area
384+
bl dcache_inval_poc
387385

388386
adrp x0, init_pg_dir
389387
adrp x1, init_pg_end
390-
sub x1, x1, x0
391-
bl __inval_dcache_area
388+
bl dcache_inval_poc
392389

393390
ret x28
394391
SYM_FUNC_END(__create_page_tables)

arch/arm64/kernel/hibernate-asm.S

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
* Because this code has to be copied to a 'safe' page, it can't call out to
4646
* other functions by PC-relative address. Also remember that it may be
4747
* mid-way through over-writing other functions. For this reason it contains
48-
* code from flush_icache_range() and uses the copy_page() macro.
48+
* code from caches_clean_inval_pou() and uses the copy_page() macro.
4949
*
5050
* This 'safe' page is mapped via ttbr0, and executed from there. This function
5151
* switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
8787
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
8888

8989
add x1, x10, #PAGE_SIZE
90-
/* Clean the copied page to PoU - based on flush_icache_range() */
90+
/* Clean the copied page to PoU - based on caches_clean_inval_pou() */
9191
raw_dcache_line_size x2, x3
9292
sub x3, x2, #1
9393
bic x4, x10, x3
94-
2: dc cvau, x4 /* clean D line / unified line */
94+
2: /* clean D line / unified line */
95+
alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
9596
add x4, x4, x2
9697
cmp x4, x1
9798
b.lo 2b

0 commit comments

Comments
 (0)