Skip to content

Commit d2b1f6d

Browse files
committed
s390/cmpxchg: get rid of gcc atomic builtins
s390 is the only architecture in the kernel which makes use of gcc's atomic builtin functions. Even though I don't see any technical problem with that right now, remove this code and open-code compare-and-swap loops again, like every other architecture is doing it also. We can switch to a generic implementation when other architectures are doing that also. See also https://lwn.net/Articles/586838/ for forther details. This basically reverts commit f318a12 ("s390/cmpxchg: use compiler builtins"). Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
1 parent b23eb63 commit d2b1f6d

1 file changed

Lines changed: 150 additions & 15 deletions

File tree

arch/s390/include/asm/cmpxchg.h

Lines changed: 150 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -12,26 +12,163 @@
1212
#include <linux/types.h>
1313
#include <linux/bug.h>
1414

15+
void __xchg_called_with_bad_pointer(void);
16+
17+
static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
18+
{
19+
unsigned long addr, old;
20+
int shift;
21+
22+
switch (size) {
23+
case 1:
24+
addr = (unsigned long) ptr;
25+
shift = (3 ^ (addr & 3)) << 3;
26+
addr ^= addr & 3;
27+
asm volatile(
28+
" l %0,%1\n"
29+
"0: lr 0,%0\n"
30+
" nr 0,%3\n"
31+
" or 0,%2\n"
32+
" cs %0,0,%1\n"
33+
" jl 0b\n"
34+
: "=&d" (old), "+Q" (*(int *) addr)
35+
: "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
36+
: "memory", "cc", "0");
37+
return old >> shift;
38+
case 2:
39+
addr = (unsigned long) ptr;
40+
shift = (2 ^ (addr & 2)) << 3;
41+
addr ^= addr & 2;
42+
asm volatile(
43+
" l %0,%1\n"
44+
"0: lr 0,%0\n"
45+
" nr 0,%3\n"
46+
" or 0,%2\n"
47+
" cs %0,0,%1\n"
48+
" jl 0b\n"
49+
: "=&d" (old), "+Q" (*(int *) addr)
50+
: "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
51+
: "memory", "cc", "0");
52+
return old >> shift;
53+
case 4:
54+
asm volatile(
55+
" l %0,%1\n"
56+
"0: cs %0,%2,%1\n"
57+
" jl 0b\n"
58+
: "=&d" (old), "+Q" (*(int *) ptr)
59+
: "d" (x)
60+
: "memory", "cc");
61+
return old;
62+
case 8:
63+
asm volatile(
64+
" lg %0,%1\n"
65+
"0: csg %0,%2,%1\n"
66+
" jl 0b\n"
67+
: "=&d" (old), "+S" (*(long *) ptr)
68+
: "d" (x)
69+
: "memory", "cc");
70+
return old;
71+
}
72+
__xchg_called_with_bad_pointer();
73+
return x;
74+
}
75+
76+
#define xchg(ptr, x) \
77+
({ \
78+
__typeof__(*(ptr)) __ret; \
79+
\
80+
__ret = (__typeof__(*(ptr))) \
81+
__xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr))); \
82+
__ret; \
83+
})
84+
85+
void __cmpxchg_called_with_bad_pointer(void);
86+
87+
static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
88+
unsigned long new, int size)
89+
{
90+
unsigned long addr, prev, tmp;
91+
int shift;
92+
93+
switch (size) {
94+
case 1:
95+
addr = (unsigned long) ptr;
96+
shift = (3 ^ (addr & 3)) << 3;
97+
addr ^= addr & 3;
98+
asm volatile(
99+
" l %0,%2\n"
100+
"0: nr %0,%5\n"
101+
" lr %1,%0\n"
102+
" or %0,%3\n"
103+
" or %1,%4\n"
104+
" cs %0,%1,%2\n"
105+
" jnl 1f\n"
106+
" xr %1,%0\n"
107+
" nr %1,%5\n"
108+
" jnz 0b\n"
109+
"1:"
110+
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
111+
: "d" ((old & 0xff) << shift),
112+
"d" ((new & 0xff) << shift),
113+
"d" (~(0xff << shift))
114+
: "memory", "cc");
115+
return prev >> shift;
116+
case 2:
117+
addr = (unsigned long) ptr;
118+
shift = (2 ^ (addr & 2)) << 3;
119+
addr ^= addr & 2;
120+
asm volatile(
121+
" l %0,%2\n"
122+
"0: nr %0,%5\n"
123+
" lr %1,%0\n"
124+
" or %0,%3\n"
125+
" or %1,%4\n"
126+
" cs %0,%1,%2\n"
127+
" jnl 1f\n"
128+
" xr %1,%0\n"
129+
" nr %1,%5\n"
130+
" jnz 0b\n"
131+
"1:"
132+
: "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
133+
: "d" ((old & 0xffff) << shift),
134+
"d" ((new & 0xffff) << shift),
135+
"d" (~(0xffff << shift))
136+
: "memory", "cc");
137+
return prev >> shift;
138+
case 4:
139+
asm volatile(
140+
" cs %0,%3,%1\n"
141+
: "=&d" (prev), "+Q" (*(int *) ptr)
142+
: "0" (old), "d" (new)
143+
: "memory", "cc");
144+
return prev;
145+
case 8:
146+
asm volatile(
147+
" csg %0,%3,%1\n"
148+
: "=&d" (prev), "+S" (*(long *) ptr)
149+
: "0" (old), "d" (new)
150+
: "memory", "cc");
151+
return prev;
152+
}
153+
__cmpxchg_called_with_bad_pointer();
154+
return old;
155+
}
156+
15157
#define cmpxchg(ptr, o, n) \
16158
({ \
17-
__typeof__(*(ptr)) __o = (o); \
18-
__typeof__(*(ptr)) __n = (n); \
19-
(__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
159+
__typeof__(*(ptr)) __ret; \
160+
\
161+
__ret = (__typeof__(*(ptr))) \
162+
__cmpxchg((ptr), (unsigned long)(o), \
163+
(unsigned long)(n), sizeof(*(ptr))); \
164+
__ret; \
20165
})
21166

22167
#define cmpxchg64 cmpxchg
23168
#define cmpxchg_local cmpxchg
24-
#define cmpxchg64_local cmpxchg
169+
#define cmpxchg64_local cmpxchg
25170

26-
#define xchg(ptr, x) \
27-
({ \
28-
__typeof__(ptr) __ptr = (ptr); \
29-
__typeof__(*(ptr)) __old; \
30-
do { \
31-
__old = *__ptr; \
32-
} while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
33-
__old; \
34-
})
171+
#define system_has_cmpxchg_double() 1
35172

36173
#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
37174
({ \
@@ -61,6 +198,4 @@
61198
__cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
62199
})
63200

64-
#define system_has_cmpxchg_double() 1
65-
66201
#endif /* __ASM_CMPXCHG_H */

0 commit comments

Comments
 (0)