Skip to content

Commit cc39f38

Browse files
author
Peter Zijlstra
committed
seqlock: Introduce scoped_seqlock_read()
The read_seqbegin/need_seqretry/done_seqretry API is cumbersome and error prone. With the new helper the "typical" code like int seq, nextseq; unsigned long flags; nextseq = 0; do { seq = nextseq; flags = read_seqbegin_or_lock_irqsave(&seqlock, &seq); // read-side critical section nextseq = 1; } while (need_seqretry(&seqlock, seq)); done_seqretry_irqrestore(&seqlock, seq, flags); can be rewritten as scoped_seqlock_read (&seqlock, ss_lock_irqsave) { // read-side critical section } Original idea by Oleg Nesterov; with contributions from Linus. Originally-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
1 parent 28a0ee3 commit cc39f38

1 file changed

Lines changed: 111 additions & 0 deletions

File tree

include/linux/seqlock.h

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1209,4 +1209,115 @@ done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
12091209
if (seq & 1)
12101210
read_sequnlock_excl_irqrestore(lock, flags);
12111211
}
1212+
1213+
enum ss_state {
1214+
ss_done = 0,
1215+
ss_lock,
1216+
ss_lock_irqsave,
1217+
ss_lockless,
1218+
};
1219+
1220+
struct ss_tmp {
1221+
enum ss_state state;
1222+
unsigned long data;
1223+
spinlock_t *lock;
1224+
spinlock_t *lock_irqsave;
1225+
};
1226+
1227+
static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
1228+
{
1229+
if (sst->lock)
1230+
spin_unlock(sst->lock);
1231+
if (sst->lock_irqsave)
1232+
spin_unlock_irqrestore(sst->lock_irqsave, sst->data);
1233+
}
1234+
1235+
extern void __scoped_seqlock_invalid_target(void);
1236+
1237+
#if defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 90000
1238+
/*
1239+
* For some reason some GCC-8 architectures (nios2, alpha) have trouble
1240+
* determining that the ss_done state is impossible in __scoped_seqlock_next()
1241+
* below.
1242+
*/
1243+
static inline void __scoped_seqlock_bug(void) { }
1244+
#else
1245+
/*
1246+
* Canary for compiler optimization -- if the compiler doesn't realize this is
1247+
* an impossible state, it very likely generates sub-optimal code here.
1248+
*/
1249+
extern void __scoped_seqlock_bug(void);
1250+
#endif
1251+
1252+
static inline void
1253+
__scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
1254+
{
1255+
switch (sst->state) {
1256+
case ss_done:
1257+
__scoped_seqlock_bug();
1258+
return;
1259+
1260+
case ss_lock:
1261+
case ss_lock_irqsave:
1262+
sst->state = ss_done;
1263+
return;
1264+
1265+
case ss_lockless:
1266+
if (!read_seqretry(lock, sst->data)) {
1267+
sst->state = ss_done;
1268+
return;
1269+
}
1270+
break;
1271+
}
1272+
1273+
switch (target) {
1274+
case ss_done:
1275+
__scoped_seqlock_invalid_target();
1276+
return;
1277+
1278+
case ss_lock:
1279+
sst->lock = &lock->lock;
1280+
spin_lock(sst->lock);
1281+
sst->state = ss_lock;
1282+
return;
1283+
1284+
case ss_lock_irqsave:
1285+
sst->lock_irqsave = &lock->lock;
1286+
spin_lock_irqsave(sst->lock_irqsave, sst->data);
1287+
sst->state = ss_lock_irqsave;
1288+
return;
1289+
1290+
case ss_lockless:
1291+
sst->data = read_seqbegin(lock);
1292+
return;
1293+
}
1294+
}
1295+
1296+
#define __scoped_seqlock_read(_seqlock, _target, _s) \
1297+
for (struct ss_tmp _s __cleanup(__scoped_seqlock_cleanup) = \
1298+
{ .state = ss_lockless, .data = read_seqbegin(_seqlock) }; \
1299+
_s.state != ss_done; \
1300+
__scoped_seqlock_next(&_s, _seqlock, _target))
1301+
1302+
/**
1303+
* scoped_seqlock_read (lock, ss_state) - execute the read side critical
1304+
* section without manual sequence
1305+
* counter handling or calls to other
1306+
* helpers
1307+
* @lock: pointer to seqlock_t protecting the data
1308+
* @ss_state: one of {ss_lock, ss_lock_irqsave, ss_lockless} indicating
1309+
* the type of critical read section
1310+
*
1311+
* Example:
1312+
*
1313+
* scoped_seqlock_read (&lock, ss_lock) {
1314+
* // read-side critical section
1315+
* }
1316+
*
1317+
* Starts with a lockess pass first. If it fails, restarts the critical
1318+
* section with the lock held.
1319+
*/
1320+
#define scoped_seqlock_read(_seqlock, _target) \
1321+
__scoped_seqlock_read(_seqlock, _target, __UNIQUE_ID(seqlock))
1322+
12121323
#endif /* __LINUX_SEQLOCK_H */

0 commit comments

Comments
 (0)