Skip to content

Commit 6cbf5b3

Browse files
committed
Merge tag 'locking-core-2024-01-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molar: "Lock guards: - Use lock guards in the ptrace code - Introduce conditional guards to extend to conditional lock primitives like mutex_trylock()/mutex_lock_interruptible()/etc. lockdep: - Optimize 'struct lock_class' to be smaller - Update file patterns in MAINTAINERS mutexes: - Document mutex lifetime rules a bit more" * tag 'locking-core-2024-01-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/mutex: Clarify that mutex_unlock(), and most other sleeping locks, can still use the lock object after it's unlocked locking/mutex: Document that mutex_unlock() is non-atomic ptrace: Convert ptrace_attach() to use lock guards locking/lockdep: Slightly reorder 'struct lock_class' to save some memory MAINTAINERS: Add include/linux/lockdep*.h cleanup: Add conditional guard support
2 parents f0a78b3 + 2b9d9e0 commit 6cbf5b3

10 files changed

Lines changed: 184 additions & 77 deletions

File tree

Documentation/locking/mutex-design.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,24 @@ features that make lock debugging easier and faster:
101101
- Detects multi-task circular deadlocks and prints out all affected
102102
locks and tasks (and only those tasks).
103103

104+
Mutexes - and most other sleeping locks like rwsems - do not provide an
105+
implicit reference for the memory they occupy, which reference is released
106+
with mutex_unlock().
107+
108+
[ This is in contrast with spin_unlock() [or completion_done()], which
109+
APIs can be used to guarantee that the memory is not touched by the
110+
lock implementation after spin_unlock()/completion_done() releases
111+
the lock. ]
112+
113+
mutex_unlock() may access the mutex structure even after it has internally
114+
released the lock already - so it's not safe for another context to
115+
acquire the mutex and assume that the mutex_unlock() context is not using
116+
the structure anymore.
117+
118+
The mutex user must ensure that the mutex is not destroyed while a
119+
release operation is still in progress - in other words, callers of
120+
mutex_unlock() must ensure that the mutex stays alive until mutex_unlock()
121+
has returned.
104122

105123
Interfaces
106124
----------

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12424,7 +12424,7 @@ S: Maintained
1242412424
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
1242512425
F: Documentation/locking/
1242612426
F: arch/*/include/asm/spinlock*.h
12427-
F: include/linux/lockdep.h
12427+
F: include/linux/lockdep*.h
1242812428
F: include/linux/mutex*.h
1242912429
F: include/linux/rwlock*.h
1243012430
F: include/linux/rwsem*.h

include/linux/cleanup.h

Lines changed: 49 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -125,25 +125,55 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
125125
* trivial wrapper around DEFINE_CLASS() above specifically
126126
* for locks.
127127
*
128+
* DEFINE_GUARD_COND(name, ext, condlock)
129+
* wrapper around EXTEND_CLASS above to add conditional lock
130+
* variants to a base class, eg. mutex_trylock() or
131+
* mutex_lock_interruptible().
132+
*
128133
* guard(name):
129-
* an anonymous instance of the (guard) class
134+
* an anonymous instance of the (guard) class, not recommended for
135+
* conditional locks.
130136
*
131137
* scoped_guard (name, args...) { }:
132138
* similar to CLASS(name, scope)(args), except the variable (with the
133139
* explicit name 'scope') is declard in a for-loop such that its scope is
134140
* bound to the next (compound) statement.
135141
*
142+
* for conditional locks the loop body is skipped when the lock is not
143+
* acquired.
144+
*
145+
* scoped_cond_guard (name, fail, args...) { }:
146+
* similar to scoped_guard(), except it does fail when the lock
147+
* acquire fails.
148+
*
136149
*/
137150

138151
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
139-
DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
152+
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
153+
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
154+
{ return *_T; }
155+
156+
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
157+
EXTEND_CLASS(_name, _ext, \
158+
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
159+
class_##_name##_t _T) \
160+
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
161+
{ return class_##_name##_lock_ptr(_T); }
140162

141163
#define guard(_name) \
142164
CLASS(_name, __UNIQUE_ID(guard))
143165

166+
#define __guard_ptr(_name) class_##_name##_lock_ptr
167+
144168
#define scoped_guard(_name, args...) \
145169
for (CLASS(_name, scope)(args), \
146-
*done = NULL; !done; done = (void *)1)
170+
*done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1)
171+
172+
#define scoped_cond_guard(_name, _fail, args...) \
173+
for (CLASS(_name, scope)(args), \
174+
*done = NULL; !done; done = (void *)1) \
175+
if (!__guard_ptr(_name)(&scope)) _fail; \
176+
else
147177

148178
/*
149179
* Additional helper macros for generating lock guards with types, either for
@@ -152,6 +182,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
152182
*
153183
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
154184
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
185+
* DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
155186
*
156187
* will result in the following type:
157188
*
@@ -173,6 +204,11 @@ typedef struct { \
173204
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
174205
{ \
175206
if (_T->lock) { _unlock; } \
207+
} \
208+
\
209+
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
210+
{ \
211+
return _T->lock; \
176212
}
177213

178214

@@ -201,4 +237,14 @@ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
201237
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
202238
__DEFINE_LOCK_GUARD_0(_name, _lock)
203239

240+
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
241+
EXTEND_CLASS(_name, _ext, \
242+
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
243+
if (_T->lock && !(_condlock)) _T->lock = NULL; \
244+
_t; }), \
245+
typeof_member(class_##_name##_t, lock) l) \
246+
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
247+
{ return class_##_name##_lock_ptr(_T); }
248+
249+
204250
#endif /* __LINUX_GUARDS_H */

include/linux/lockdep_types.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,12 +127,12 @@ struct lock_class {
127127
unsigned long usage_mask;
128128
const struct lock_trace *usage_traces[LOCK_TRACE_STATES];
129129

130+
const char *name;
130131
/*
131132
* Generation counter, when doing certain classes of graph walking,
132133
* to ensure that we check one node only once:
133134
*/
134135
int name_version;
135-
const char *name;
136136

137137
u8 wait_type_inner;
138138
u8 wait_type_outer;

include/linux/mutex.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,7 @@ extern void mutex_unlock(struct mutex *lock);
221221
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
222222

223223
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
224-
DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
224+
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
225+
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
225226

226227
#endif /* __LINUX_MUTEX_H */

include/linux/rwsem.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
203203
extern void up_write(struct rw_semaphore *sem);
204204

205205
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
206-
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
207-
208-
DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
209-
DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
206+
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
207+
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
210208

209+
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
210+
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
211211

212212
/*
213213
* downgrade write lock to read lock

include/linux/sched/task.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -226,4 +226,6 @@ static inline void task_unlock(struct task_struct *p)
226226
spin_unlock(&p->alloc_lock);
227227
}
228228

229+
DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
230+
229231
#endif /* _LINUX_SCHED_TASK_H */

include/linux/spinlock.h

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
507507
raw_spin_lock(_T->lock),
508508
raw_spin_unlock(_T->lock))
509509

510+
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
511+
510512
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
511513
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
512514
raw_spin_unlock(_T->lock))
@@ -515,23 +517,62 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
515517
raw_spin_lock_irq(_T->lock),
516518
raw_spin_unlock_irq(_T->lock))
517519

520+
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
521+
518522
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
519523
raw_spin_lock_irqsave(_T->lock, _T->flags),
520524
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
521525
unsigned long flags)
522526

527+
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
528+
raw_spin_trylock_irqsave(_T->lock, _T->flags))
529+
523530
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
524531
spin_lock(_T->lock),
525532
spin_unlock(_T->lock))
526533

534+
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
535+
527536
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
528537
spin_lock_irq(_T->lock),
529538
spin_unlock_irq(_T->lock))
530539

540+
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
541+
spin_trylock_irq(_T->lock))
542+
531543
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
532544
spin_lock_irqsave(_T->lock, _T->flags),
533545
spin_unlock_irqrestore(_T->lock, _T->flags),
534546
unsigned long flags)
535547

548+
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
549+
spin_trylock_irqsave(_T->lock, _T->flags))
550+
551+
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
552+
read_lock(_T->lock),
553+
read_unlock(_T->lock))
554+
555+
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
556+
read_lock_irq(_T->lock),
557+
read_unlock_irq(_T->lock))
558+
559+
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
560+
read_lock_irqsave(_T->lock, _T->flags),
561+
read_unlock_irqrestore(_T->lock, _T->flags),
562+
unsigned long flags)
563+
564+
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
565+
write_lock(_T->lock),
566+
write_unlock(_T->lock))
567+
568+
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
569+
write_lock_irq(_T->lock),
570+
write_unlock_irq(_T->lock))
571+
572+
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
573+
write_lock_irqsave(_T->lock, _T->flags),
574+
write_unlock_irqrestore(_T->lock, _T->flags),
575+
unsigned long flags)
576+
536577
#undef __LINUX_INSIDE_SPINLOCK_H
537578
#endif /* __LINUX_SPINLOCK_H */

kernel/locking/mutex.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -532,6 +532,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
532532
* This function must not be used in interrupt context. Unlocking
533533
* of a not locked mutex is not allowed.
534534
*
535+
* The caller must ensure that the mutex stays alive until this function has
536+
* returned - mutex_unlock() can NOT directly be used to release an object such
537+
* that another concurrent task can free it.
538+
* Mutexes are different from spinlocks & refcounts in this aspect.
539+
*
535540
* This function is similar to (but not equivalent to) up().
536541
*/
537542
void __sched mutex_unlock(struct mutex *lock)

0 commit comments

Comments
 (0)