Skip to content

Commit e29a491

Browse files
Frederic Weisbeckerpaulmckrcu
authored andcommitted
srcu: Debug NMI safety even on archs that don't require it
Currently the NMI safety debugging is only performed on architectures that don't support NMI-safe this_cpu_inc(). Reorder the code so that other architectures like x86 also detect bad uses. [ paulmck: Apply kernel test robot, Stephen Rothwell, and Zqiang feedback. ] Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent ae3c070 commit e29a491

3 files changed

Lines changed: 34 additions & 31 deletions

File tree

include/linux/srcu.h

Lines changed: 24 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -65,14 +65,14 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp);
6565
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie);
6666

6767
#ifdef CONFIG_NEED_SRCU_NMI_SAFE
68-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe) __acquires(ssp);
69-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe) __releases(ssp);
68+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp);
69+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp);
7070
#else
71-
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
71+
static inline int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
7272
{
7373
return __srcu_read_lock(ssp);
7474
}
75-
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
75+
static inline void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
7676
{
7777
__srcu_read_unlock(ssp, idx);
7878
}
@@ -118,6 +118,18 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
118118

119119
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
120120

121+
#define SRCU_NMI_UNKNOWN 0x0
122+
#define SRCU_NMI_UNSAFE 0x1
123+
#define SRCU_NMI_SAFE 0x2
124+
125+
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU)
126+
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe);
127+
#else
128+
static inline void srcu_check_nmi_safety(struct srcu_struct *ssp,
129+
bool nmi_safe) { }
130+
#endif
131+
132+
121133
/**
122134
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
123135
* @p: the pointer to fetch and protect for later dereferencing
@@ -175,6 +187,7 @@ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
175187
{
176188
int retval;
177189

190+
srcu_check_nmi_safety(ssp, false);
178191
retval = __srcu_read_lock(ssp);
179192
rcu_lock_acquire(&(ssp)->dep_map);
180193
return retval;
@@ -191,10 +204,8 @@ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp
191204
{
192205
int retval;
193206

194-
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
195-
retval = __srcu_read_lock_nmisafe(ssp, true);
196-
else
197-
retval = __srcu_read_lock(ssp);
207+
srcu_check_nmi_safety(ssp, true);
208+
retval = __srcu_read_lock_nmisafe(ssp);
198209
rcu_lock_acquire(&(ssp)->dep_map);
199210
return retval;
200211
}
@@ -205,6 +216,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
205216
{
206217
int retval;
207218

219+
srcu_check_nmi_safety(ssp, false);
208220
retval = __srcu_read_lock(ssp);
209221
return retval;
210222
}
@@ -220,6 +232,7 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
220232
__releases(ssp)
221233
{
222234
WARN_ON_ONCE(idx & ~0x1);
235+
srcu_check_nmi_safety(ssp, false);
223236
rcu_lock_release(&(ssp)->dep_map);
224237
__srcu_read_unlock(ssp, idx);
225238
}
@@ -235,17 +248,16 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
235248
__releases(ssp)
236249
{
237250
WARN_ON_ONCE(idx & ~0x1);
251+
srcu_check_nmi_safety(ssp, true);
238252
rcu_lock_release(&(ssp)->dep_map);
239-
if (IS_ENABLED(CONFIG_NEED_SRCU_NMI_SAFE))
240-
__srcu_read_unlock_nmisafe(ssp, idx, true);
241-
else
242-
__srcu_read_unlock(ssp, idx);
253+
__srcu_read_unlock_nmisafe(ssp, idx);
243254
}
244255

245256
/* Used by tracing, cannot be traced and cannot call lockdep. */
246257
static inline notrace void
247258
srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
248259
{
260+
srcu_check_nmi_safety(ssp, false);
249261
__srcu_read_unlock(ssp, idx);
250262
}
251263

include/linux/srcutree.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,6 @@ struct srcu_data {
4343
struct srcu_struct *ssp;
4444
};
4545

46-
#define SRCU_NMI_UNKNOWN 0x0
47-
#define SRCU_NMI_NMI_UNSAFE 0x1
48-
#define SRCU_NMI_NMI_SAFE 0x2
49-
5046
/*
5147
* Node in SRCU combining tree, similar in function to rcu_data.
5248
*/

kernel/rcu/srcutree.c

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -631,17 +631,16 @@ void cleanup_srcu_struct(struct srcu_struct *ssp)
631631
}
632632
EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
633633

634+
#ifdef CONFIG_PROVE_RCU
634635
/*
635636
* Check for consistent NMI safety.
636637
*/
637-
static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
638+
void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
638639
{
639640
int nmi_safe_mask = 1 << nmi_safe;
640641
int old_nmi_safe_mask;
641642
struct srcu_data *sdp;
642643

643-
if (!IS_ENABLED(CONFIG_PROVE_RCU))
644-
return;
645644
/* NMI-unsafe use in NMI is a bad sign */
646645
WARN_ON_ONCE(!nmi_safe && in_nmi());
647646
sdp = raw_cpu_ptr(ssp->sda);
@@ -652,6 +651,8 @@ static void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
652651
}
653652
WARN_ONCE(old_nmi_safe_mask != nmi_safe_mask, "CPU %d old state %d new state %d\n", sdp->cpu, old_nmi_safe_mask, nmi_safe_mask);
654653
}
654+
EXPORT_SYMBOL_GPL(srcu_check_nmi_safety);
655+
#endif /* CONFIG_PROVE_RCU */
655656

656657
/*
657658
* Counts the new reader in the appropriate per-CPU element of the
@@ -665,7 +666,6 @@ int __srcu_read_lock(struct srcu_struct *ssp)
665666
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
666667
this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
667668
smp_mb(); /* B */ /* Avoid leaking the critical section. */
668-
srcu_check_nmi_safety(ssp, false);
669669
return idx;
670670
}
671671
EXPORT_SYMBOL_GPL(__srcu_read_lock);
@@ -679,7 +679,6 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
679679
{
680680
smp_mb(); /* C */ /* Avoid leaking the critical section. */
681681
this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
682-
srcu_check_nmi_safety(ssp, false);
683682
}
684683
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
685684

@@ -690,16 +689,14 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
690689
* srcu_struct, but in an NMI-safe manner using RMW atomics.
691690
* Returns an index that must be passed to the matching srcu_read_unlock().
692691
*/
693-
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp, bool chknmisafe)
692+
int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
694693
{
695694
int idx;
696695
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
697696

698697
idx = READ_ONCE(ssp->srcu_idx) & 0x1;
699698
atomic_long_inc(&sdp->srcu_lock_count[idx]);
700699
smp_mb__after_atomic(); /* B */ /* Avoid leaking the critical section. */
701-
if (chknmisafe)
702-
srcu_check_nmi_safety(ssp, true);
703700
return idx;
704701
}
705702
EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
@@ -709,14 +706,12 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock_nmisafe);
709706
* element of the srcu_struct. Note that this may well be a different
710707
* CPU than that which was incremented by the corresponding srcu_read_lock().
711708
*/
712-
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx, bool chknmisafe)
709+
void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
713710
{
714711
struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
715712

716713
smp_mb__before_atomic(); /* C */ /* Avoid leaking the critical section. */
717714
atomic_long_inc(&sdp->srcu_unlock_count[idx]);
718-
if (chknmisafe)
719-
srcu_check_nmi_safety(ssp, true);
720715
}
721716
EXPORT_SYMBOL_GPL(__srcu_read_unlock_nmisafe);
722717

@@ -1163,7 +1158,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11631158
* SRCU read-side critical section so that the grace-period
11641159
* sequence number cannot wrap around in the meantime.
11651160
*/
1166-
idx = __srcu_read_lock_nmisafe(ssp, false);
1161+
idx = __srcu_read_lock_nmisafe(ssp);
11671162
ss_state = smp_load_acquire(&ssp->srcu_size_state);
11681163
if (ss_state < SRCU_SIZE_WAIT_CALL)
11691164
sdp = per_cpu_ptr(ssp->sda, 0);
@@ -1196,7 +1191,7 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
11961191
srcu_funnel_gp_start(ssp, sdp, s, do_norm);
11971192
else if (needexp)
11981193
srcu_funnel_exp_start(ssp, sdp_mynode, s);
1199-
__srcu_read_unlock_nmisafe(ssp, idx, false);
1194+
__srcu_read_unlock_nmisafe(ssp, idx);
12001195
return s;
12011196
}
12021197

@@ -1500,13 +1495,13 @@ void srcu_barrier(struct srcu_struct *ssp)
15001495
/* Initial count prevents reaching zero until all CBs are posted. */
15011496
atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
15021497

1503-
idx = __srcu_read_lock_nmisafe(ssp, false);
1498+
idx = __srcu_read_lock_nmisafe(ssp);
15041499
if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
15051500
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
15061501
else
15071502
for_each_possible_cpu(cpu)
15081503
srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1509-
__srcu_read_unlock_nmisafe(ssp, idx, false);
1504+
__srcu_read_unlock_nmisafe(ssp, idx);
15101505

15111506
/* Remove the initial count, at which point reaching zero can happen. */
15121507
if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))

0 commit comments

Comments
 (0)