Skip to content

Commit c0f6799

Browse files
committed
Merge tip:locking/core into tip:ras/core
Pick up helpers inlining work in order to address more noinstr fallout in the MCE code. Signed-off-by: Borislav Petkov <bp@suse.de>
2 parents 754e0b0 + b008893 commit c0f6799

14 files changed

Lines changed: 108 additions & 50 deletions

File tree

MAINTAINERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3200,6 +3200,7 @@ ATOMIC INFRASTRUCTURE
32003200
M: Will Deacon <will@kernel.org>
32013201
M: Peter Zijlstra <peterz@infradead.org>
32023202
R: Boqun Feng <boqun.feng@gmail.com>
3203+
R: Mark Rutland <mark.rutland@arm.com>
32033204
L: linux-kernel@vger.kernel.org
32043205
S: Maintained
32053206
F: arch/*/include/asm/atomic*.h

arch/x86/include/asm/cpumask.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,21 @@ static __always_inline bool arch_cpu_online(int cpu)
2020
{
2121
return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
2222
}
23+
24+
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
25+
{
26+
arch_clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
27+
}
2328
#else
2429
static __always_inline bool arch_cpu_online(int cpu)
2530
{
2631
return cpu == 0;
2732
}
33+
34+
static __always_inline void arch_cpumask_clear_cpu(int cpu, struct cpumask *dstp)
35+
{
36+
return;
37+
}
2838
#endif
2939

3040
#define arch_cpu_is_offline(cpu) unlikely(!arch_cpu_online(cpu))

arch/x86/include/asm/ptrace.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ static __always_inline int user_mode(struct pt_regs *regs)
137137
#endif
138138
}
139139

140-
static inline int v8086_mode(struct pt_regs *regs)
140+
static __always_inline int v8086_mode(struct pt_regs *regs)
141141
{
142142
#ifdef CONFIG_X86_32
143143
return (regs->flags & X86_VM_MASK);

include/asm-generic/bitops/instrumented-atomic.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
* Note that @nr may be almost arbitrarily large; this function is not
2424
* restricted to acting on a single-word quantity.
2525
*/
26-
static inline void set_bit(long nr, volatile unsigned long *addr)
26+
static __always_inline void set_bit(long nr, volatile unsigned long *addr)
2727
{
2828
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
2929
arch_set_bit(nr, addr);
@@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr)
3636
*
3737
* This is a relaxed atomic operation (no implied memory barriers).
3838
*/
39-
static inline void clear_bit(long nr, volatile unsigned long *addr)
39+
static __always_inline void clear_bit(long nr, volatile unsigned long *addr)
4040
{
4141
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
4242
arch_clear_bit(nr, addr);
@@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr)
5252
* Note that @nr may be almost arbitrarily large; this function is not
5353
* restricted to acting on a single-word quantity.
5454
*/
55-
static inline void change_bit(long nr, volatile unsigned long *addr)
55+
static __always_inline void change_bit(long nr, volatile unsigned long *addr)
5656
{
5757
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
5858
arch_change_bit(nr, addr);
@@ -65,7 +65,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
6565
*
6666
* This is an atomic fully-ordered operation (implied full memory barrier).
6767
*/
68-
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
68+
static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
6969
{
7070
kcsan_mb();
7171
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
@@ -79,7 +79,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
7979
*
8080
* This is an atomic fully-ordered operation (implied full memory barrier).
8181
*/
82-
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
82+
static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
8383
{
8484
kcsan_mb();
8585
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
@@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
9393
*
9494
* This is an atomic fully-ordered operation (implied full memory barrier).
9595
*/
96-
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
96+
static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
9797
{
9898
kcsan_mb();
9999
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));

include/asm-generic/bitops/instrumented-non-atomic.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
* region of memory concurrently, the effect may be that only one operation
2323
* succeeds.
2424
*/
25-
static inline void __set_bit(long nr, volatile unsigned long *addr)
25+
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
2626
{
2727
instrument_write(addr + BIT_WORD(nr), sizeof(long));
2828
arch___set_bit(nr, addr);
@@ -37,7 +37,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr)
3737
* region of memory concurrently, the effect may be that only one operation
3838
* succeeds.
3939
*/
40-
static inline void __clear_bit(long nr, volatile unsigned long *addr)
40+
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
4141
{
4242
instrument_write(addr + BIT_WORD(nr), sizeof(long));
4343
arch___clear_bit(nr, addr);
@@ -52,13 +52,13 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
5252
* region of memory concurrently, the effect may be that only one operation
5353
* succeeds.
5454
*/
55-
static inline void __change_bit(long nr, volatile unsigned long *addr)
55+
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
5656
{
5757
instrument_write(addr + BIT_WORD(nr), sizeof(long));
5858
arch___change_bit(nr, addr);
5959
}
6060

61-
static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
61+
static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
6262
{
6363
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
6464
/*
@@ -90,7 +90,7 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long
9090
* This operation is non-atomic. If two instances of this operation race, one
9191
* can appear to succeed but actually fail.
9292
*/
93-
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
93+
static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
9494
{
9595
__instrument_read_write_bitop(nr, addr);
9696
return arch___test_and_set_bit(nr, addr);
@@ -104,7 +104,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
104104
* This operation is non-atomic. If two instances of this operation race, one
105105
* can appear to succeed but actually fail.
106106
*/
107-
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
107+
static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
108108
{
109109
__instrument_read_write_bitop(nr, addr);
110110
return arch___test_and_clear_bit(nr, addr);
@@ -118,7 +118,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
118118
* This operation is non-atomic. If two instances of this operation race, one
119119
* can appear to succeed but actually fail.
120120
*/
121-
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
121+
static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
122122
{
123123
__instrument_read_write_bitop(nr, addr);
124124
return arch___test_and_change_bit(nr, addr);
@@ -129,7 +129,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
129129
* @nr: bit number to test
130130
* @addr: Address to start counting from
131131
*/
132-
static inline bool test_bit(long nr, const volatile unsigned long *addr)
132+
static __always_inline bool test_bit(long nr, const volatile unsigned long *addr)
133133
{
134134
instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long));
135135
return arch_test_bit(nr, addr);

include/linux/atomic/atomic-arch-fallback.h

Lines changed: 33 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,16 @@
151151
static __always_inline int
152152
arch_atomic_read_acquire(const atomic_t *v)
153153
{
154-
return smp_load_acquire(&(v)->counter);
154+
int ret;
155+
156+
if (__native_word(atomic_t)) {
157+
ret = smp_load_acquire(&(v)->counter);
158+
} else {
159+
ret = arch_atomic_read(v);
160+
__atomic_acquire_fence();
161+
}
162+
163+
return ret;
155164
}
156165
#define arch_atomic_read_acquire arch_atomic_read_acquire
157166
#endif
@@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v)
160169
static __always_inline void
161170
arch_atomic_set_release(atomic_t *v, int i)
162171
{
163-
smp_store_release(&(v)->counter, i);
172+
if (__native_word(atomic_t)) {
173+
smp_store_release(&(v)->counter, i);
174+
} else {
175+
__atomic_release_fence();
176+
arch_atomic_set(v, i);
177+
}
164178
}
165179
#define arch_atomic_set_release arch_atomic_set_release
166180
#endif
@@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v)
12581272
static __always_inline s64
12591273
arch_atomic64_read_acquire(const atomic64_t *v)
12601274
{
1261-
return smp_load_acquire(&(v)->counter);
1275+
s64 ret;
1276+
1277+
if (__native_word(atomic64_t)) {
1278+
ret = smp_load_acquire(&(v)->counter);
1279+
} else {
1280+
ret = arch_atomic64_read(v);
1281+
__atomic_acquire_fence();
1282+
}
1283+
1284+
return ret;
12621285
}
12631286
#define arch_atomic64_read_acquire arch_atomic64_read_acquire
12641287
#endif
@@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v)
12671290
static __always_inline void
12681291
arch_atomic64_set_release(atomic64_t *v, s64 i)
12691292
{
1270-
smp_store_release(&(v)->counter, i);
1293+
if (__native_word(atomic64_t)) {
1294+
smp_store_release(&(v)->counter, i);
1295+
} else {
1296+
__atomic_release_fence();
1297+
arch_atomic64_set(v, i);
1298+
}
12711299
}
12721300
#define arch_atomic64_set_release arch_atomic64_set_release
12731301
#endif
@@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
23582386
#endif
23592387

23602388
#endif /* _LINUX_ATOMIC_FALLBACK_H */
2361-
// cca554917d7ea73d5e3e7397dd70c484cad9b2c4
2389+
// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae

include/linux/cpumask.h

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -102,15 +102,15 @@ extern atomic_t __num_online_cpus;
102102

103103
extern cpumask_t cpus_booted_once_mask;
104104

105-
static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
105+
static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
106106
{
107107
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
108108
WARN_ON_ONCE(cpu >= bits);
109109
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
110110
}
111111

112112
/* verify cpu argument to cpumask_* operators */
113-
static inline unsigned int cpumask_check(unsigned int cpu)
113+
static __always_inline unsigned int cpumask_check(unsigned int cpu)
114114
{
115115
cpu_max_bits_warn(cpu, nr_cpumask_bits);
116116
return cpu;
@@ -341,12 +341,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
341341
* @cpu: cpu number (< nr_cpu_ids)
342342
* @dstp: the cpumask pointer
343343
*/
344-
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
344+
static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
345345
{
346346
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
347347
}
348348

349-
static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
349+
static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
350350
{
351351
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
352352
}
@@ -357,12 +357,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
357357
* @cpu: cpu number (< nr_cpu_ids)
358358
* @dstp: the cpumask pointer
359359
*/
360-
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
360+
static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
361361
{
362362
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
363363
}
364364

365-
static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
365+
static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
366366
{
367367
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
368368
}
@@ -374,7 +374,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
374374
*
375375
* Returns 1 if @cpu is set in @cpumask, else returns 0
376376
*/
377-
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
377+
static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
378378
{
379379
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
380380
}
@@ -388,7 +388,7 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
388388
*
389389
* test_and_set_bit wrapper for cpumasks.
390390
*/
391-
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
391+
static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
392392
{
393393
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
394394
}
@@ -402,7 +402,7 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
402402
*
403403
* test_and_clear_bit wrapper for cpumasks.
404404
*/
405-
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
405+
static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
406406
{
407407
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
408408
}

include/linux/local_lock_internal.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l)
4444
}
4545
#else /* CONFIG_DEBUG_LOCK_ALLOC */
4646
# define LOCAL_LOCK_DEBUG_INIT(lockname)
47-
static inline void local_lock_acquire(local_lock_t *l) { }
48-
static inline void local_lock_release(local_lock_t *l) { }
49-
static inline void local_lock_debug_init(local_lock_t *l) { }
47+
# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
48+
# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
49+
# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0)
5050
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
5151

5252
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }

init/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2054,6 +2054,7 @@ source "arch/Kconfig"
20542054

20552055
config RT_MUTEXES
20562056
bool
2057+
default y if PREEMPT_RT
20572058

20582059
config BASE_SMALL
20592060
int

kernel/locking/lockdep.c

Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6011,13 +6011,10 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
60116011

60126012
static void reinit_class(struct lock_class *class)
60136013
{
6014-
void *const p = class;
6015-
const unsigned int offset = offsetof(struct lock_class, key);
6016-
60176014
WARN_ON_ONCE(!class->lock_entry.next);
60186015
WARN_ON_ONCE(!list_empty(&class->locks_after));
60196016
WARN_ON_ONCE(!list_empty(&class->locks_before));
6020-
memset(p + offset, 0, sizeof(*class) - offset);
6017+
memset_startat(class, 0, key);
60216018
WARN_ON_ONCE(!class->lock_entry.next);
60226019
WARN_ON_ONCE(!list_empty(&class->locks_after));
60236020
WARN_ON_ONCE(!list_empty(&class->locks_before));
@@ -6290,7 +6287,13 @@ void lockdep_reset_lock(struct lockdep_map *lock)
62906287
lockdep_reset_lock_reg(lock);
62916288
}
62926289

6293-
/* Unregister a dynamically allocated key. */
6290+
/*
6291+
* Unregister a dynamically allocated key.
6292+
*
6293+
* Unlike lockdep_register_key(), a search is always done to find a matching
6294+
* key irrespective of debug_locks to avoid potential invalid access to freed
6295+
* memory in lock_class entry.
6296+
*/
62946297
void lockdep_unregister_key(struct lock_class_key *key)
62956298
{
62966299
struct hlist_head *hash_head = keyhashentry(key);
@@ -6305,22 +6308,22 @@ void lockdep_unregister_key(struct lock_class_key *key)
63056308
return;
63066309

63076310
raw_local_irq_save(flags);
6308-
if (!graph_lock())
6309-
goto out_irq;
6311+
lockdep_lock();
63106312

6311-
pf = get_pending_free();
63126313
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
63136314
if (k == key) {
63146315
hlist_del_rcu(&k->hash_entry);
63156316
found = true;
63166317
break;
63176318
}
63186319
}
6319-
WARN_ON_ONCE(!found);
6320-
__lockdep_free_key_range(pf, key, 1);
6321-
call_rcu_zapped(pf);
6322-
graph_unlock();
6323-
out_irq:
6320+
WARN_ON_ONCE(!found && debug_locks);
6321+
if (found) {
6322+
pf = get_pending_free();
6323+
__lockdep_free_key_range(pf, key, 1);
6324+
call_rcu_zapped(pf);
6325+
}
6326+
lockdep_unlock();
63246327
raw_local_irq_restore(flags);
63256328

63266329
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */

0 commit comments

Comments
 (0)