Skip to content

Commit 0001742

Browse files
committed
s390/atomic,cmpxchg: switch to use atomic-instrumented.h
Add arch_ prefix to all atomic operations, and define ARCH_ATOMIC. This enables KASAN instrumentation for all atomic operations on s390. This is the s390 variant of commit 8bf705d ("locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h"). Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
1 parent d2b1f6d commit 0001742

2 files changed

Lines changed: 58 additions & 30 deletions

File tree

arch/s390/include/asm/atomic.h

Lines changed: 52 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -15,41 +15,46 @@
1515
#include <asm/barrier.h>
1616
#include <asm/cmpxchg.h>
1717

18-
static inline int atomic_read(const atomic_t *v)
18+
static inline int arch_atomic_read(const atomic_t *v)
1919
{
2020
return __atomic_read(v);
2121
}
22+
#define arch_atomic_read arch_atomic_read
2223

23-
static inline void atomic_set(atomic_t *v, int i)
24+
static inline void arch_atomic_set(atomic_t *v, int i)
2425
{
2526
__atomic_set(v, i);
2627
}
28+
#define arch_atomic_set arch_atomic_set
2729

28-
static inline int atomic_add_return(int i, atomic_t *v)
30+
static inline int arch_atomic_add_return(int i, atomic_t *v)
2931
{
3032
return __atomic_add_barrier(i, &v->counter) + i;
3133
}
34+
#define arch_atomic_add_return arch_atomic_add_return
3235

33-
static inline int atomic_fetch_add(int i, atomic_t *v)
36+
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
3437
{
3538
return __atomic_add_barrier(i, &v->counter);
3639
}
40+
#define arch_atomic_fetch_add arch_atomic_fetch_add
3741

38-
static inline void atomic_add(int i, atomic_t *v)
42+
static inline void arch_atomic_add(int i, atomic_t *v)
3943
{
4044
__atomic_add(i, &v->counter);
4145
}
46+
#define arch_atomic_add arch_atomic_add
4247

43-
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
44-
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
45-
#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
48+
#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
49+
#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
50+
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
4651

4752
#define ATOMIC_OPS(op) \
48-
static inline void atomic_##op(int i, atomic_t *v) \
53+
static inline void arch_atomic_##op(int i, atomic_t *v) \
4954
{ \
5055
__atomic_##op(i, &v->counter); \
5156
} \
52-
static inline int atomic_fetch_##op(int i, atomic_t *v) \
57+
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
5358
{ \
5459
return __atomic_##op##_barrier(i, &v->counter); \
5560
}
@@ -60,53 +65,67 @@ ATOMIC_OPS(xor)
6065

6166
#undef ATOMIC_OPS
6267

63-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
68+
#define arch_atomic_and arch_atomic_and
69+
#define arch_atomic_or arch_atomic_or
70+
#define arch_atomic_xor arch_atomic_xor
71+
#define arch_atomic_fetch_and arch_atomic_fetch_and
72+
#define arch_atomic_fetch_or arch_atomic_fetch_or
73+
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
6474

65-
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
75+
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
76+
77+
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
6678
{
6779
return __atomic_cmpxchg(&v->counter, old, new);
6880
}
81+
#define arch_atomic_cmpxchg arch_atomic_cmpxchg
6982

7083
#define ATOMIC64_INIT(i) { (i) }
7184

72-
static inline s64 atomic64_read(const atomic64_t *v)
85+
static inline s64 arch_atomic64_read(const atomic64_t *v)
7386
{
7487
return __atomic64_read(v);
7588
}
89+
#define arch_atomic64_read arch_atomic64_read
7690

77-
static inline void atomic64_set(atomic64_t *v, s64 i)
91+
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
7892
{
7993
__atomic64_set(v, i);
8094
}
95+
#define arch_atomic64_set arch_atomic64_set
8196

82-
static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
97+
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
8398
{
8499
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
85100
}
101+
#define arch_atomic64_add_return arch_atomic64_add_return
86102

87-
static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
103+
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
88104
{
89105
return __atomic64_add_barrier(i, (long *)&v->counter);
90106
}
107+
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
91108

92-
static inline void atomic64_add(s64 i, atomic64_t *v)
109+
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
93110
{
94111
__atomic64_add(i, (long *)&v->counter);
95112
}
113+
#define arch_atomic64_add arch_atomic64_add
96114

97-
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
115+
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
98116

99-
static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
117+
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
100118
{
101119
return __atomic64_cmpxchg((long *)&v->counter, old, new);
102120
}
121+
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
103122

104123
#define ATOMIC64_OPS(op) \
105-
static inline void atomic64_##op(s64 i, atomic64_t *v) \
124+
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
106125
{ \
107126
__atomic64_##op(i, (long *)&v->counter); \
108127
} \
109-
static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
128+
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
110129
{ \
111130
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
112131
}
@@ -117,8 +136,17 @@ ATOMIC64_OPS(xor)
117136

118137
#undef ATOMIC64_OPS
119138

120-
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
121-
#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
122-
#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
139+
#define arch_atomic64_and arch_atomic64_and
140+
#define arch_atomic64_or arch_atomic64_or
141+
#define arch_atomic64_xor arch_atomic64_xor
142+
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
143+
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
144+
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
145+
146+
#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147+
#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
148+
#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
149+
150+
#define ARCH_ATOMIC
123151

124152
#endif /* __ARCH_S390_ATOMIC__ */

arch/s390/include/asm/cmpxchg.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
7373
return x;
7474
}
7575

76-
#define xchg(ptr, x) \
76+
#define arch_xchg(ptr, x) \
7777
({ \
7878
__typeof__(*(ptr)) __ret; \
7979
\
@@ -154,7 +154,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
154154
return old;
155155
}
156156

157-
#define cmpxchg(ptr, o, n) \
157+
#define arch_cmpxchg(ptr, o, n) \
158158
({ \
159159
__typeof__(*(ptr)) __ret; \
160160
\
@@ -164,9 +164,9 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
164164
__ret; \
165165
})
166166

167-
#define cmpxchg64 cmpxchg
168-
#define cmpxchg_local cmpxchg
169-
#define cmpxchg64_local cmpxchg
167+
#define arch_cmpxchg64 arch_cmpxchg
168+
#define arch_cmpxchg_local arch_cmpxchg
169+
#define arch_cmpxchg64_local arch_cmpxchg
170170

171171
#define system_has_cmpxchg_double() 1
172172

@@ -188,7 +188,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
188188
!cc; \
189189
})
190190

191-
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
191+
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
192192
({ \
193193
__typeof__(p1) __p1 = (p1); \
194194
__typeof__(p2) __p2 = (p2); \

0 commit comments

Comments
 (0)