Skip to content

Commit 79974cc

Browse files
committed
LoongArch: Add atomic operations for 32BIT/64BIT
LoongArch64 has both AMO and LL/SC instructions, while LoongArch32 only has LL/SC intstructions. So we add a Kconfig option CPU_HAS_AMO here and implement atomic operations (also including local operations and percpu operations) for both 32BIT and 64BIT platforms. Reviewed-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent bf3fa8f commit 79974cc

7 files changed

Lines changed: 413 additions & 219 deletions

File tree

arch/loongarch/Kconfig

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -568,6 +568,10 @@ config ARCH_STRICT_ALIGN
568568
to run kernel only on systems with h/w unaligned access support in
569569
order to optimise for performance.
570570

571+
config CPU_HAS_AMO
572+
bool
573+
default 64BIT
574+
571575
config CPU_HAS_FPU
572576
bool
573577
default y
Lines changed: 206 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,206 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Atomic operations (AMO).
4+
*
5+
* Copyright (C) 2020-2025 Loongson Technology Corporation Limited
6+
*/
7+
8+
#ifndef _ASM_ATOMIC_AMO_H
9+
#define _ASM_ATOMIC_AMO_H
10+
11+
#include <linux/types.h>
12+
#include <asm/barrier.h>
13+
#include <asm/cmpxchg.h>
14+
15+
#define ATOMIC_OP(op, I, asm_op) \
16+
static inline void arch_atomic_##op(int i, atomic_t *v) \
17+
{ \
18+
__asm__ __volatile__( \
19+
"am"#asm_op".w" " $zero, %1, %0 \n" \
20+
: "+ZB" (v->counter) \
21+
: "r" (I) \
22+
: "memory"); \
23+
}
24+
25+
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
26+
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
27+
{ \
28+
int result; \
29+
\
30+
__asm__ __volatile__( \
31+
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
32+
: "+ZB" (v->counter), "=&r" (result) \
33+
: "r" (I) \
34+
: "memory"); \
35+
\
36+
return result c_op I; \
37+
}
38+
39+
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
40+
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
41+
{ \
42+
int result; \
43+
\
44+
__asm__ __volatile__( \
45+
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
46+
: "+ZB" (v->counter), "=&r" (result) \
47+
: "r" (I) \
48+
: "memory"); \
49+
\
50+
return result; \
51+
}
52+
53+
#define ATOMIC_OPS(op, I, asm_op, c_op) \
54+
ATOMIC_OP(op, I, asm_op) \
55+
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
56+
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
57+
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
58+
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
59+
60+
ATOMIC_OPS(add, i, add, +)
61+
ATOMIC_OPS(sub, -i, add, +)
62+
63+
#define arch_atomic_add_return arch_atomic_add_return
64+
#define arch_atomic_add_return_acquire arch_atomic_add_return
65+
#define arch_atomic_add_return_release arch_atomic_add_return
66+
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
67+
#define arch_atomic_sub_return arch_atomic_sub_return
68+
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
69+
#define arch_atomic_sub_return_release arch_atomic_sub_return
70+
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
71+
#define arch_atomic_fetch_add arch_atomic_fetch_add
72+
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
73+
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
74+
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
75+
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
76+
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
77+
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
78+
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
79+
80+
#undef ATOMIC_OPS
81+
82+
#define ATOMIC_OPS(op, I, asm_op) \
83+
ATOMIC_OP(op, I, asm_op) \
84+
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
85+
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
86+
87+
ATOMIC_OPS(and, i, and)
88+
ATOMIC_OPS(or, i, or)
89+
ATOMIC_OPS(xor, i, xor)
90+
91+
#define arch_atomic_fetch_and arch_atomic_fetch_and
92+
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
93+
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
94+
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
95+
#define arch_atomic_fetch_or arch_atomic_fetch_or
96+
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
97+
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
98+
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
99+
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
100+
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
101+
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
102+
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
103+
104+
#undef ATOMIC_OPS
105+
#undef ATOMIC_FETCH_OP
106+
#undef ATOMIC_OP_RETURN
107+
#undef ATOMIC_OP
108+
109+
#ifdef CONFIG_64BIT
110+
111+
#define ATOMIC64_OP(op, I, asm_op) \
112+
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
113+
{ \
114+
__asm__ __volatile__( \
115+
"am"#asm_op".d " " $zero, %1, %0 \n" \
116+
: "+ZB" (v->counter) \
117+
: "r" (I) \
118+
: "memory"); \
119+
}
120+
121+
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
122+
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
123+
{ \
124+
long result; \
125+
__asm__ __volatile__( \
126+
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
127+
: "+ZB" (v->counter), "=&r" (result) \
128+
: "r" (I) \
129+
: "memory"); \
130+
\
131+
return result c_op I; \
132+
}
133+
134+
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
135+
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
136+
{ \
137+
long result; \
138+
\
139+
__asm__ __volatile__( \
140+
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
141+
: "+ZB" (v->counter), "=&r" (result) \
142+
: "r" (I) \
143+
: "memory"); \
144+
\
145+
return result; \
146+
}
147+
148+
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
149+
ATOMIC64_OP(op, I, asm_op) \
150+
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
151+
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
152+
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
153+
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
154+
155+
ATOMIC64_OPS(add, i, add, +)
156+
ATOMIC64_OPS(sub, -i, add, +)
157+
158+
#define arch_atomic64_add_return arch_atomic64_add_return
159+
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
160+
#define arch_atomic64_add_return_release arch_atomic64_add_return
161+
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
162+
#define arch_atomic64_sub_return arch_atomic64_sub_return
163+
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
164+
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
165+
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
166+
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
167+
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
168+
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
169+
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
170+
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
171+
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
172+
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
173+
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
174+
175+
#undef ATOMIC64_OPS
176+
177+
#define ATOMIC64_OPS(op, I, asm_op) \
178+
ATOMIC64_OP(op, I, asm_op) \
179+
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
180+
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
181+
182+
ATOMIC64_OPS(and, i, and)
183+
ATOMIC64_OPS(or, i, or)
184+
ATOMIC64_OPS(xor, i, xor)
185+
186+
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
187+
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
188+
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
189+
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
190+
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
191+
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
192+
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
193+
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
194+
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
195+
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
196+
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
197+
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
198+
199+
#undef ATOMIC64_OPS
200+
#undef ATOMIC64_FETCH_OP
201+
#undef ATOMIC64_OP_RETURN
202+
#undef ATOMIC64_OP
203+
204+
#endif
205+
206+
#endif /* _ASM_ATOMIC_AMO_H */
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
/*
3+
* Atomic operations (LLSC).
4+
*
5+
* Copyright (C) 2024-2025 Loongson Technology Corporation Limited
6+
*/
7+
8+
#ifndef _ASM_ATOMIC_LLSC_H
9+
#define _ASM_ATOMIC_LLSC_H
10+
11+
#include <linux/types.h>
12+
#include <asm/barrier.h>
13+
#include <asm/cmpxchg.h>
14+
15+
#define ATOMIC_OP(op, I, asm_op) \
16+
static inline void arch_atomic_##op(int i, atomic_t *v) \
17+
{ \
18+
int temp; \
19+
\
20+
__asm__ __volatile__( \
21+
"1: ll.w %0, %1 #atomic_" #op " \n" \
22+
" " #asm_op " %0, %0, %2 \n" \
23+
" sc.w %0, %1 \n" \
24+
" beq %0, $r0, 1b \n" \
25+
:"=&r" (temp) , "+ZC"(v->counter) \
26+
:"r" (I) \
27+
); \
28+
}
29+
30+
#define ATOMIC_OP_RETURN(op, I, asm_op) \
31+
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
32+
{ \
33+
int result, temp; \
34+
\
35+
__asm__ __volatile__( \
36+
"1: ll.w %1, %2 # atomic_" #op "_return \n" \
37+
" " #asm_op " %0, %1, %3 \n" \
38+
" sc.w %0, %2 \n" \
39+
" beq %0, $r0 ,1b \n" \
40+
" " #asm_op " %0, %1, %3 \n" \
41+
: "=&r" (result), "=&r" (temp), "+ZC"(v->counter) \
42+
: "r" (I)); \
43+
\
44+
return result; \
45+
}
46+
47+
#define ATOMIC_FETCH_OP(op, I, asm_op) \
48+
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
49+
{ \
50+
int result, temp; \
51+
\
52+
__asm__ __volatile__( \
53+
"1: ll.w %1, %2 # atomic_fetch_" #op " \n" \
54+
" " #asm_op " %0, %1, %3 \n" \
55+
" sc.w %0, %2 \n" \
56+
" beq %0, $r0 ,1b \n" \
57+
" add.w %0, %1 ,$r0 \n" \
58+
: "=&r" (result), "=&r" (temp), "+ZC" (v->counter) \
59+
: "r" (I)); \
60+
\
61+
return result; \
62+
}
63+
64+
#define ATOMIC_OPS(op,I ,asm_op, c_op) \
65+
ATOMIC_OP(op, I, asm_op) \
66+
ATOMIC_OP_RETURN(op, I , asm_op) \
67+
ATOMIC_FETCH_OP(op, I, asm_op)
68+
69+
ATOMIC_OPS(add, i , add.w ,+=)
70+
ATOMIC_OPS(sub, -i , add.w ,+=)
71+
72+
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
73+
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
74+
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
75+
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
76+
77+
#undef ATOMIC_OPS
78+
79+
#define ATOMIC_OPS(op, I, asm_op) \
80+
ATOMIC_OP(op, I, asm_op) \
81+
ATOMIC_FETCH_OP(op, I, asm_op)
82+
83+
ATOMIC_OPS(and, i, and)
84+
ATOMIC_OPS(or, i, or)
85+
ATOMIC_OPS(xor, i, xor)
86+
87+
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
88+
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
89+
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
90+
91+
#undef ATOMIC_OPS
92+
#undef ATOMIC_FETCH_OP
93+
#undef ATOMIC_OP_RETURN
94+
#undef ATOMIC_OP
95+
96+
#ifdef CONFIG_64BIT
97+
#error "64-bit LLSC atomic operations are not supported"
98+
#endif
99+
100+
#endif /* _ASM_ATOMIC_LLSC_H */

0 commit comments

Comments
 (0)