Skip to content

Commit a15286c

Browse files
committed
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: - Core locking & atomics: - Convert all architectures to ARCH_ATOMIC: move every architecture to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the transitory facilities and #ifdefs. Much reduction in complexity from that series: 63 files changed, 756 insertions(+), 4094 deletions(-) - Self-test enhancements - Futexes: - Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC). [ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of FLAGS_CLOCKRT & invert the flag's meaning to avoid having to introduce a new variant was resisted successfully. ] - Enhance futex self-tests - Lockdep: - Fix dependency path printouts - Optimize trace saving - Broaden & fix wait-context checks - Misc cleanups and fixes. * tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits) locking/lockdep: Correct the description error for check_redundant() futex: Provide FUTEX_LOCK_PI2 to support clock selection futex: Prepare futex_lock_pi() for runtime clock selection lockdep/selftest: Remove wait-type RCU_CALLBACK tests lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING lockdep: Fix wait-type for empty stack locking/selftests: Add a selftest for check_irq_usage() lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage() locking/lockdep: Remove the unnecessary trace saving locking/lockdep: Fix the dep path printing for backwards BFS selftests: futex: Add futex compare requeue test selftests: futex: Add futex wait test seqlock: Remove trailing semicolon in macros locking/lockdep: Reduce LOCKDEP dependency list locking/lockdep,doc: Improve readability of the block matrix locking/atomics: atomic-instrumented: simplify ifdeffery locking/atomic: delete !ARCH_ATOMIC remnants locking/atomic: xtensa: move to ARCH_ATOMIC locking/atomic: sparc: move to ARCH_ATOMIC locking/atomic: sh: move to ARCH_ATOMIC ...
2 parents b89c07d + 0e8a89d commit a15286c

77 files changed

Lines changed: 1395 additions & 4157 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

Documentation/locking/lockdep-design.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -453,9 +453,9 @@ There are simply four block conditions:
453453
Block condition matrix, Y means the row blocks the column, and N means otherwise.
454454

455455
+---+---+---+---+
456-
| | E | r | R |
456+
| | W | r | R |
457457
+---+---+---+---+
458-
| E | Y | Y | Y |
458+
| W | Y | Y | Y |
459459
+---+---+---+---+
460460
| r | Y | Y | N |
461461
+---+---+---+---+

arch/alpha/include/asm/atomic.h

Lines changed: 47 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@
2626

2727
#define ATOMIC64_INIT(i) { (i) }
2828

29-
#define atomic_read(v) READ_ONCE((v)->counter)
30-
#define atomic64_read(v) READ_ONCE((v)->counter)
29+
#define arch_atomic_read(v) READ_ONCE((v)->counter)
30+
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
3131

32-
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
33-
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
32+
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
33+
#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
3434

3535
/*
3636
* To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
3939
*/
4040

4141
#define ATOMIC_OP(op, asm_op) \
42-
static __inline__ void atomic_##op(int i, atomic_t * v) \
42+
static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
4343
{ \
4444
unsigned long temp; \
4545
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
5555
} \
5656

5757
#define ATOMIC_OP_RETURN(op, asm_op) \
58-
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
58+
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
5959
{ \
6060
long temp, result; \
6161
__asm__ __volatile__( \
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
7474
}
7575

7676
#define ATOMIC_FETCH_OP(op, asm_op) \
77-
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
77+
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
7878
{ \
7979
long temp, result; \
8080
__asm__ __volatile__( \
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
9292
}
9393

9494
#define ATOMIC64_OP(op, asm_op) \
95-
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
95+
static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
9696
{ \
9797
s64 temp; \
9898
__asm__ __volatile__( \
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
108108
} \
109109

110110
#define ATOMIC64_OP_RETURN(op, asm_op) \
111-
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
111+
static __inline__ s64 \
112+
arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
112113
{ \
113114
s64 temp, result; \
114115
__asm__ __volatile__( \
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
127128
}
128129

129130
#define ATOMIC64_FETCH_OP(op, asm_op) \
130-
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
131+
static __inline__ s64 \
132+
arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
131133
{ \
132134
s64 temp, result; \
133135
__asm__ __volatile__( \
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
155157
ATOMIC_OPS(add)
156158
ATOMIC_OPS(sub)
157159

158-
#define atomic_add_return_relaxed atomic_add_return_relaxed
159-
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
160-
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
161-
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
160+
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
161+
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
162+
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
163+
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
162164

163-
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
164-
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
165-
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
166-
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
165+
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
166+
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
167+
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
168+
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
167169

168-
#define atomic_andnot atomic_andnot
169-
#define atomic64_andnot atomic64_andnot
170+
#define arch_atomic_andnot arch_atomic_andnot
171+
#define arch_atomic64_andnot arch_atomic64_andnot
170172

171173
#undef ATOMIC_OPS
172174
#define ATOMIC_OPS(op, asm) \
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
180182
ATOMIC_OPS(or, bis)
181183
ATOMIC_OPS(xor, xor)
182184

183-
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
184-
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
185-
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
186-
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
185+
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
186+
#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
187+
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
188+
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
187189

188-
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
189-
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
190-
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
191-
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
190+
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191+
#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
192+
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
193+
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
192194

193195
#undef ATOMIC_OPS
194196
#undef ATOMIC64_FETCH_OP
@@ -198,22 +200,26 @@ ATOMIC_OPS(xor, xor)
198200
#undef ATOMIC_OP_RETURN
199201
#undef ATOMIC_OP
200202

201-
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
202-
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
203+
#define arch_atomic64_cmpxchg(v, old, new) \
204+
(arch_cmpxchg(&((v)->counter), old, new))
205+
#define arch_atomic64_xchg(v, new) \
206+
(arch_xchg(&((v)->counter), new))
203207

204-
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
205-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
208+
#define arch_atomic_cmpxchg(v, old, new) \
209+
(arch_cmpxchg(&((v)->counter), old, new))
210+
#define arch_atomic_xchg(v, new) \
211+
(arch_xchg(&((v)->counter), new))
206212

207213
/**
208-
* atomic_fetch_add_unless - add unless the number is a given value
214+
* arch_atomic_fetch_add_unless - add unless the number is a given value
209215
* @v: pointer of type atomic_t
210216
* @a: the amount to add to v...
211217
* @u: ...unless v is equal to u.
212218
*
213219
* Atomically adds @a to @v, so long as it was not @u.
214220
* Returns the old value of @v.
215221
*/
216-
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
222+
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
217223
{
218224
int c, new, old;
219225
smp_mb();
@@ -234,18 +240,18 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
234240
smp_mb();
235241
return old;
236242
}
237-
#define atomic_fetch_add_unless atomic_fetch_add_unless
243+
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
238244

239245
/**
240-
* atomic64_fetch_add_unless - add unless the number is a given value
246+
* arch_atomic64_fetch_add_unless - add unless the number is a given value
241247
* @v: pointer of type atomic64_t
242248
* @a: the amount to add to v...
243249
* @u: ...unless v is equal to u.
244250
*
245251
* Atomically adds @a to @v, so long as it was not @u.
246252
* Returns the old value of @v.
247253
*/
248-
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
254+
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
249255
{
250256
s64 c, new, old;
251257
smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
266272
smp_mb();
267273
return old;
268274
}
269-
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
275+
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
270276

271277
/*
272-
* atomic64_dec_if_positive - decrement by 1 if old value positive
278+
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
273279
* @v: pointer of type atomic_t
274280
*
275281
* The function returns the old value of *v minus 1, even if
276282
* the atomic variable, v, was not decremented.
277283
*/
278-
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
284+
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
279285
{
280286
s64 old, tmp;
281287
smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
295301
smp_mb();
296302
return old - 1;
297303
}
298-
#define atomic64_dec_if_positive atomic64_dec_if_positive
304+
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
299305

300306
#endif /* _ALPHA_ATOMIC_H */

arch/alpha/include/asm/cmpxchg.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
sizeof(*(ptr))); \
1818
})
1919

20-
#define cmpxchg_local(ptr, o, n) \
20+
#define arch_cmpxchg_local(ptr, o, n) \
2121
({ \
2222
__typeof__(*(ptr)) _o_ = (o); \
2323
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
2626
sizeof(*(ptr))); \
2727
})
2828

29-
#define cmpxchg64_local(ptr, o, n) \
29+
#define arch_cmpxchg64_local(ptr, o, n) \
3030
({ \
3131
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
3232
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
4242
* The leading and the trailing memory barriers guarantee that these
4343
* operations are fully ordered.
4444
*/
45-
#define xchg(ptr, x) \
45+
#define arch_xchg(ptr, x) \
4646
({ \
4747
__typeof__(*(ptr)) __ret; \
4848
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
5353
__ret; \
5454
})
5555

56-
#define cmpxchg(ptr, o, n) \
56+
#define arch_cmpxchg(ptr, o, n) \
5757
({ \
5858
__typeof__(*(ptr)) __ret; \
5959
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
6565
__ret; \
6666
})
6767

68-
#define cmpxchg64(ptr, o, n) \
68+
#define arch_cmpxchg64(ptr, o, n) \
6969
({ \
7070
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
71-
cmpxchg((ptr), (o), (n)); \
71+
arch_cmpxchg((ptr), (o), (n)); \
7272
})
7373

7474
#undef ____cmpxchg

0 commit comments

Comments
 (0)